diff --git a/pengine/group.c b/pengine/group.c
index 2733ef797c..8fd1152c22 100644
--- a/pengine/group.c
+++ b/pengine/group.c
@@ -1,511 +1,526 @@
 /* 
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  * 
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  * 
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  * 
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <pengine.h>
 #include <crm/msg_xml.h>
 
 #include <allocate.h>
 #include <utils.h>
 
 #define VARIANT_GROUP 1
 #include <lib/pengine/variant.h>
 
 node_t *
 group_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
 {
     node_t *node = NULL;
     node_t *group_node = NULL;
     GListPtr gIter = NULL;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     if (is_not_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to;
     }
     pe_rsc_trace(rsc, "Processing %s", rsc->id);
     if (is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     if (group_data->first_child == NULL) {
         /* nothign to allocate */
         clear_bit(rsc->flags, pe_rsc_provisional);
         return NULL;
     }
 
     set_bit(rsc->flags, pe_rsc_allocating);
     rsc->role = group_data->first_child->role;
 
     group_data->first_child->rsc_cons =
         g_list_concat(group_data->first_child->rsc_cons, rsc->rsc_cons);
     rsc->rsc_cons = NULL;
 
     group_data->first_child->rsc_cons_lhs =
         g_list_concat(group_data->first_child->rsc_cons_lhs, rsc->rsc_cons_lhs);
     rsc->rsc_cons_lhs = NULL;
 
     gIter = rsc->rsc_tickets;
     for (; gIter != NULL; gIter = gIter->next) {
         rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) gIter->data;
 
         if (rsc_ticket->ticket->granted == FALSE || rsc_ticket->ticket->standby) {
             rsc_ticket_constraint(rsc, rsc_ticket, data_set);
         }
     }
 
     dump_node_scores(show_scores ? 0 : scores_log_level, rsc, __PRETTY_FUNCTION__,
                      rsc->allowed_nodes);
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         node = child_rsc->cmds->allocate(child_rsc, prefer, data_set);
         if (group_node == NULL) {
             group_node = node;
         }
     }
 
     rsc->next_role = group_data->first_child->next_role;
     clear_bit(rsc->flags, pe_rsc_allocating);
     clear_bit(rsc->flags, pe_rsc_provisional);
 
     if (group_data->colocated) {
         return group_node;
     }
     return NULL;
 }
 
 void group_update_pseudo_status(resource_t * parent, resource_t * child);
 
 void
 group_create_actions(resource_t * rsc, pe_working_set_t * data_set)
 {
     action_t *op = NULL;
     const char *value = NULL;
     GListPtr gIter = rsc->children;
 
     pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         child_rsc->cmds->create_actions(child_rsc, data_set);
         group_update_pseudo_status(rsc, child_rsc);
     }
 
     op = start_action(rsc, NULL, TRUE /* !group_data->child_starting */ );
     set_bit(op->flags, pe_action_pseudo | pe_action_runnable);
 
     op = custom_action(rsc, started_key(rsc),
                        RSC_STARTED, NULL, TRUE /* !group_data->child_starting */ , TRUE, data_set);
     set_bit(op->flags, pe_action_pseudo | pe_action_runnable);
 
     op = stop_action(rsc, NULL, TRUE /* !group_data->child_stopping */ );
     set_bit(op->flags, pe_action_pseudo | pe_action_runnable);
 
     op = custom_action(rsc, stopped_key(rsc),
                        RSC_STOPPED, NULL, TRUE /* !group_data->child_stopping */ , TRUE, data_set);
     set_bit(op->flags, pe_action_pseudo | pe_action_runnable);
 
     value = g_hash_table_lookup(rsc->meta, "stateful");
     if (crm_is_true(value)) {
         op = custom_action(rsc, demote_key(rsc), RSC_DEMOTE, NULL, TRUE, TRUE, data_set);
         set_bit(op->flags, pe_action_pseudo);
         set_bit(op->flags, pe_action_runnable);
         op = custom_action(rsc, demoted_key(rsc), RSC_DEMOTED, NULL, TRUE, TRUE, data_set);
         set_bit(op->flags, pe_action_pseudo);
         set_bit(op->flags, pe_action_runnable);
 
         op = custom_action(rsc, promote_key(rsc), RSC_PROMOTE, NULL, TRUE, TRUE, data_set);
         set_bit(op->flags, pe_action_pseudo);
         set_bit(op->flags, pe_action_runnable);
         op = custom_action(rsc, promoted_key(rsc), RSC_PROMOTED, NULL, TRUE, TRUE, data_set);
         set_bit(op->flags, pe_action_pseudo);
         set_bit(op->flags, pe_action_runnable);
     }
 }
 
 void
 group_update_pseudo_status(resource_t * parent, resource_t * child)
 {
     GListPtr gIter = child->actions;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, parent);
 
     if (group_data->ordered == FALSE) {
         /* If this group is not ordered, then leave the meta-actions as optional */
         return;
     }
 
     if (group_data->child_stopping && group_data->child_starting) {
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         if (is_set(action->flags, pe_action_optional)) {
             continue;
         }
         if (safe_str_eq(RSC_STOP, action->task) && is_set(action->flags, pe_action_runnable)) {
             group_data->child_stopping = TRUE;
             pe_rsc_trace(action->rsc, "Based on %s the group is stopping", action->uuid);
 
         } else if (safe_str_eq(RSC_START, action->task)
                    && is_set(action->flags, pe_action_runnable)) {
             group_data->child_starting = TRUE;
             pe_rsc_trace(action->rsc, "Based on %s the group is starting", action->uuid);
         }
     }
 }
 
 void
 group_internal_constraints(resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = rsc->children;
     resource_t *last_rsc = NULL;
+    resource_t *last_active = NULL;
     resource_t *top = uber_parent(rsc);
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set);
     new_rsc_order(rsc, RSC_START, rsc, RSC_STARTED, pe_order_runnable_left, data_set);
     new_rsc_order(rsc, RSC_STOP, rsc, RSC_STOPPED, pe_order_runnable_left, data_set);
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
         int stop = pe_order_none;
         int stopped = pe_order_implies_then_printed;
         int start = pe_order_implies_then | pe_order_runnable_left;
         int started =
             pe_order_runnable_left | pe_order_implies_then | pe_order_implies_then_printed;
 
         child_rsc->cmds->internal_constraints(child_rsc, data_set);
 
         if (last_rsc == NULL) {
             if (group_data->ordered) {
                 stop |= pe_order_optional;
                 stopped = pe_order_implies_then;
             }
 
         } else if (group_data->colocated) {
             rsc_colocation_new("group:internal_colocation", NULL, INFINITY,
                                child_rsc, last_rsc, NULL, NULL, data_set);
         }
 
         if (top->variant == pe_master) {
             new_rsc_order(rsc, RSC_DEMOTE, child_rsc, RSC_DEMOTE,
                           stop | pe_order_implies_first_printed, data_set);
 
             new_rsc_order(child_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, stopped, data_set);
 
             new_rsc_order(child_rsc, RSC_PROMOTE, rsc, RSC_PROMOTED, started, data_set);
 
             new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE,
                           pe_order_implies_first_printed, data_set);
 
         }
 
         order_start_start(rsc, child_rsc, pe_order_implies_first_printed);
         order_stop_stop(rsc, child_rsc, stop | pe_order_implies_first_printed);
 
         new_rsc_order(child_rsc, RSC_STOP, rsc, RSC_STOPPED, stopped, data_set);
 
         new_rsc_order(child_rsc, RSC_START, rsc, RSC_STARTED, started, data_set);
 
         if (group_data->ordered == FALSE) {
             order_start_start(rsc, child_rsc, start | pe_order_implies_first_printed);
             if (top->variant == pe_master) {
                 new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE,
                               start | pe_order_implies_first_printed, data_set);
             }
 
         } else if (last_rsc != NULL) {
             child_rsc->restart_type = pe_restart_restart;
 
             order_start_start(last_rsc, child_rsc, start);
             order_stop_stop(child_rsc, last_rsc, pe_order_optional|pe_order_restart);
 
             if (top->variant == pe_master) {
                 new_rsc_order(last_rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, start, data_set);
                 new_rsc_order(child_rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE, pe_order_optional,
                               data_set);
             }
 
         } else {
             /* If anyone in the group is starting, then
              *  pe_order_implies_then will cause _everyone_ in the group
              *  to be sent a start action
              * But this is safe since starting something that is already
              *  started is required to be "safe"
              */
             int flags = pe_order_none;
 
             order_start_start(rsc, child_rsc, flags);
             if (top->variant == pe_master) {
                 new_rsc_order(rsc, RSC_PROMOTE, child_rsc, RSC_PROMOTE, flags, data_set);
             }
 
         }
 
+        /* Look for partially active groups
+         * Make sure they still shut down in sequence
+         */
+        if(child_rsc->running_on) {
+            if(group_data->ordered
+               && last_rsc
+               && last_rsc->running_on == NULL
+               && last_active
+               && last_active->running_on) {
+                order_stop_stop(child_rsc, last_active, pe_order_optional);
+            }
+            last_active = child_rsc;
+        }
+
         last_rsc = child_rsc;
     }
 
     if (group_data->ordered && last_rsc != NULL) {
         int stop_stop_flags = pe_order_implies_then;
         int stop_stopped_flags = pe_order_optional;
 
         order_stop_stop(rsc, last_rsc, stop_stop_flags);
         new_rsc_order(last_rsc, RSC_STOP, rsc, RSC_STOPPED, stop_stopped_flags, data_set);
 
         if (top->variant == pe_master) {
             new_rsc_order(rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE, stop_stop_flags, data_set);
             new_rsc_order(last_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, stop_stopped_flags, data_set);
         }
     }
 }
 
 void
 group_rsc_colocation_lh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
 {
     GListPtr gIter = NULL;
     group_variant_data_t *group_data = NULL;
 
     if (rsc_lh == NULL) {
         pe_err("rsc_lh was NULL for %s", constraint->id);
         return;
 
     } else if (rsc_rh == NULL) {
         pe_err("rsc_rh was NULL for %s", constraint->id);
         return;
     }
 
     gIter = rsc_lh->children;
     pe_rsc_trace(rsc_lh, "Processing constraints from %s", rsc_lh->id);
 
     get_group_variant_data(group_data, rsc_lh);
 
     if (group_data->colocated) {
         group_data->first_child->cmds->rsc_colocation_lh(group_data->first_child, rsc_rh,
                                                          constraint);
         return;
 
     } else if (constraint->score >= INFINITY) {
         crm_config_err("%s: Cannot perform manditory colocation"
                        " between non-colocated group and %s", rsc_lh->id, rsc_rh->id);
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         child_rsc->cmds->rsc_colocation_lh(child_rsc, rsc_rh, constraint);
     }
 }
 
 void
 group_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
 {
     GListPtr gIter = rsc_rh->children;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc_rh);
     CRM_CHECK(rsc_lh->variant == pe_native, return);
 
     pe_rsc_trace(rsc_rh, "Processing RH of constraint %s", constraint->id);
     print_resource(LOG_DEBUG_3, "LHS", rsc_lh, TRUE);
 
     if (is_set(rsc_rh->flags, pe_rsc_provisional)) {
         return;
 
     } else if (group_data->colocated && group_data->first_child) {
         if (constraint->score >= INFINITY) {
             /* Ensure RHS is _fully_ up before can start LHS */
             group_data->last_child->cmds->rsc_colocation_rh(rsc_lh, group_data->last_child,
                                                             constraint);
         } else {
             /* A partially active RHS is fine */
             group_data->first_child->cmds->rsc_colocation_rh(rsc_lh, group_data->first_child,
                                                              constraint);
         }
 
         return;
 
     } else if (constraint->score >= INFINITY) {
         crm_config_err("%s: Cannot perform manditory colocation with"
                        " non-colocated group: %s", rsc_lh->id, rsc_rh->id);
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint);
     }
 }
 
 enum pe_action_flags
 group_action_flags(action_t * action, node_t * node)
 {
     GListPtr gIter = NULL;
     enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
 
     for (gIter = action->rsc->children; gIter != NULL; gIter = gIter->next) {
         resource_t *child = (resource_t *) gIter->data;
         enum action_tasks task = get_complex_task(child, action->task, TRUE);
         const char *task_s = task2text(task);
         action_t *child_action = find_first_action(child->actions, NULL, task_s, node);
 
         if (child_action) {
             enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
 
             if (is_set(flags, pe_action_optional)
                 && is_set(child_flags, pe_action_optional) == FALSE) {
                 pe_rsc_trace(action->rsc, "%s is manditory because of %s", action->uuid, child_action->uuid);
                 clear_bit(flags, pe_action_optional);
                 pe_clear_action_bit(action, pe_action_optional);
             }
             if (safe_str_neq(task_s, action->task)
                 && is_set(flags, pe_action_runnable)
                 && is_set(child_flags, pe_action_runnable) == FALSE) {
                 pe_rsc_trace(action->rsc, "%s is not runnable because of %s", action->uuid, child_action->uuid);
                 clear_bit(flags, pe_action_runnable);
                 pe_clear_action_bit(action, pe_action_runnable);
             }
 
         } else if (task != stop_rsc) {
             pe_rsc_trace(action->rsc, "%s is not runnable because of %s (not found in %s)", action->uuid, task_s,
                       child->id);
             clear_bit(flags, pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 enum pe_graph_flags
 group_update_actions(action_t * first, action_t * then, node_t * node, enum pe_action_flags flags,
                      enum pe_action_flags filter, enum pe_ordering type)
 {
     GListPtr gIter = then->rsc->children;
     enum pe_graph_flags changed = pe_graph_none;
 
     CRM_ASSERT(then->rsc != NULL);
     changed |= native_update_actions(first, then, node, flags, filter, type);
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child = (resource_t *) gIter->data;
         action_t *child_action = find_first_action(child->actions, NULL, then->task, node);
 
         if (child_action) {
             changed |= child->cmds->update_actions(first, child_action, node, flags, filter, type);
         }
     }
 
     return changed;
 }
 
 void
 group_rsc_location(resource_t * rsc, rsc_to_node_t * constraint)
 {
     GListPtr gIter = rsc->children;
     GListPtr saved = constraint->node_list_rh;
     GListPtr zero = node_list_dup(constraint->node_list_rh, TRUE, FALSE);
     gboolean reset_scores = TRUE;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     pe_rsc_debug(rsc, "Processing rsc_location %s for %s", constraint->id, rsc->id);
 
     native_rsc_location(rsc, constraint);
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         child_rsc->cmds->rsc_location(child_rsc, constraint);
         if (group_data->colocated && reset_scores) {
             reset_scores = FALSE;
             constraint->node_list_rh = zero;
         }
     }
 
     constraint->node_list_rh = saved;
     g_list_free_full(zero, free);
 }
 
 void
 group_expand(resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = rsc->children;
 
     pe_rsc_trace(rsc, "Processing actions from %s", rsc->id);
 
     CRM_CHECK(rsc != NULL, return);
     native_expand(rsc, data_set);
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         child_rsc->cmds->expand(child_rsc, data_set);
     }
 }
 
 GHashTable *
 group_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
                     float factor, enum pe_weights flags)
 {
     GListPtr gIter = rsc->rsc_cons_lhs;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     if (is_set(rsc->flags, pe_rsc_merging)) {
         pe_rsc_info(rsc, "Breaking dependency loop with %s at %s", rsc->id, rhs);
         return nodes;
     }
 
     set_bit(rsc->flags, pe_rsc_merging);
 
     nodes =
         group_data->first_child->cmds->merge_weights(group_data->first_child, rhs, nodes, attr,
                                                      factor, flags);
 
     for (; gIter != NULL; gIter = gIter->next) {
         rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
 
         nodes = native_merge_weights(constraint->rsc_lh, rsc->id, nodes,
                                      constraint->node_attribute,
                                      (float) constraint->score / INFINITY, flags);
     }
 
     clear_bit(rsc->flags, pe_rsc_merging);
     return nodes;
 }
 
 void
 group_append_meta(resource_t * rsc, xmlNode * xml)
 {
 }
diff --git a/pengine/regression.sh b/pengine/regression.sh
index 637042a36b..f4a47783a7 100755
--- a/pengine/regression.sh
+++ b/pengine/regression.sh
@@ -1,657 +1,658 @@
 #!/bin/bash
 
  # Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  # 
  # This program is free software; you can redistribute it and/or
  # modify it under the terms of the GNU General Public
  # License as published by the Free Software Foundation; either
  # version 2 of the License, or (at your option) any later version.
  # 
  # This software is distributed in the hope that it will be useful,
  # but WITHOUT ANY WARRANTY; without even the implied warranty of
  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  # General Public License for more details.
  # 
  # You should have received a copy of the GNU General Public
  # License along with this library; if not, write to the Free Software
  # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  #
 
 core=`dirname $0`
 . $core/regression.core.sh
 
 create_mode="true"
 info Generating test outputs for these tests...
 # do_test file description
 
 info Done.
 echo ""
 
 info Performing the following tests from $io_dir
 create_mode="false"
 
 echo ""
 do_test simple1 "Offline     "
 do_test simple2 "Start       "
 do_test simple3 "Start 2     "
 do_test simple4 "Start Failed"
 do_test simple6 "Stop Start  "
 do_test simple7 "Shutdown    "
 #do_test simple8 "Stonith	"
 #do_test simple9 "Lower version"
 #do_test simple10 "Higher version"
 do_test simple11 "Priority (ne)"
 do_test simple12 "Priority (eq)"
 do_test simple8 "Stickiness"
 
 echo ""
 do_test group1 "Group		"
 do_test group2 "Group + Native	"
 do_test group3 "Group + Group	"
 do_test group4 "Group + Native (nothing)"
 do_test group5 "Group + Native (move)   "
 do_test group6 "Group + Group (move)    "
 do_test group7 "Group colocation"
 do_test group13 "Group colocation (cant run)"
 do_test group8 "Group anti-colocation"
 do_test group9 "Group recovery"
 do_test group10 "Group partial recovery"
 do_test group11 "Group target_role"
 do_test group14 "Group stop (graph terminated)"
 do_test group15 "-ve group colocation"
 do_test bug-1573 "Partial stop of a group with two children"
 do_test bug-1718 "Mandatory group ordering - Stop group_FUN"
 do_test bug-lf-2613 "Move group on failure"
 do_test bug-lf-2619 "Move group on clone failure"
+do_test group-fail "Ensure stop order is preserved for partially active groups"
 
 echo ""
 do_test rsc_dep1 "Must not     "
 do_test rsc_dep3 "Must         "
 do_test rsc_dep5 "Must not 3   "
 do_test rsc_dep7 "Must 3       "
 do_test rsc_dep10 "Must (but cant)"
 do_test rsc_dep2  "Must (running) "
 do_test rsc_dep8  "Must (running : alt) "
 do_test rsc_dep4  "Must (running + move)"
 do_test asymmetric "Asymmetric - require explicit location constraints"
 
 echo ""
 do_test orphan-0 "Orphan ignore"
 do_test orphan-1 "Orphan stop"
 do_test orphan-2 "Orphan stop, remove failcount"
 
 echo ""
 do_test params-0 "Params: No change"
 do_test params-1 "Params: Changed"
 do_test params-2 "Params: Resource definition"
 do_test params-4 "Params: Reload"
 do_test params-5 "Params: Restart based on probe digest"
 do_test novell-251689 "Resource definition change + target_role=stopped"
 do_test bug-lf-2106 "Restart all anonymous clone instances after config change"
 do_test params-6 "Params: Detect reload in previously migrated resource"
 
 echo ""
 do_test target-0 "Target Role : baseline"
 do_test target-1 "Target Role : master"
 do_test target-2 "Target Role : invalid"
 
 echo ""
 do_test domain "Failover domains"
 do_test base-score "Set a node's default score for all nodes"
 
 echo ""
 do_test date-1 "Dates" -t "2005-020"
 do_test date-2 "Date Spec - Pass" -t "2005-020T12:30"
 do_test date-3 "Date Spec - Fail" -t "2005-020T11:30"
 do_test probe-0 "Probe (anon clone)"
 do_test probe-1 "Pending Probe"
 do_test probe-2 "Correctly re-probe cloned groups"
 do_test probe-3 "Probe (pending node)"
 do_test probe-4 "Probe (pending node + stopped resource)" --rc 4
 do_test standby "Standby"
 do_test comments "Comments"
 
 echo ""
 do_test one-or-more-0 "Everything starts"
 do_test one-or-more-1 "Nothing starts because of A"
 do_test one-or-more-2 "D can start because of C"
 do_test one-or-more-3 "D cannot start because of B and C"
 do_test one-or-more-4 "D cannot start because of target-role"
 do_test one-or-more-5 "Start A and F even though C and D are stopped"
 do_test one-or-more-6 "Leave A running even though B is stopped"
 do_test one-or-more-7 "Leave A running even though C is stopped"
 
 echo ""
 do_test order1 "Order start 1     "
 do_test order2 "Order start 2     "
 do_test order3 "Order stop	  "
 do_test order4 "Order (multiple)  "
 do_test order5 "Order (move)  "
 do_test order6 "Order (move w/ restart)  "
 do_test order7 "Order (manditory)  "
 do_test order-optional "Order (score=0)  "
 do_test order-required "Order (score=INFINITY)  "
 do_test bug-lf-2171 "Prevent group start when clone is stopped"
 do_test order-clone "Clone ordering should be able to prevent startup of dependant clones"
 do_test order-sets "Ordering for resource sets"
 do_test order-serialize "Serialize resources without inhibiting migration"
 do_test order-serialize-set "Serialize a set of resources without inhibiting migration"
 do_test clone-order-primitive "Order clone start after a primitive"
 do_test order-optional-keyword "Order (optional keyword)"
 do_test order-mandatory "Order (mandatory keyword)"
 do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones"
 do_test ordered-set-basic-startup "Constraint set with default order settings."
 do_test order-wrong-kind "Order (error)"
 
 echo ""
 do_test coloc-loop "Colocation - loop"
 do_test coloc-many-one "Colocation - many-to-one"
 do_test coloc-list "Colocation - many-to-one with list"
 do_test coloc-group "Colocation - groups"
 do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation"
 do_test coloc-attr "Colocation based on node attributes"
 do_test coloc-negative-group "Negative colocation with a group"
 do_test coloc-intra-set "Intra-set colocation"
 do_test bug-lf-2435 "Colocation sets with a negative score"
 do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependant must stop"
 do_test coloc_fp_logic "Verify floating point calculations in colocation are working"
 do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc."
 do_test colo_slave_w_native  "cl#5070 - Verify promotion order is affected when colocating slave to native rsc."
 
 echo ""
 do_test rsc-sets-seq-true "Resource Sets - sequential=false"
 do_test rsc-sets-seq-false "Resource Sets - sequential=true"
 do_test rsc-sets-clone "Resource Sets - Clone"
 do_test rsc-sets-master "Resource Sets - Master"
 do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)"
 
 #echo ""
 #do_test agent1 "version: lt (empty)"
 #do_test agent2 "version: eq	"
 #do_test agent3 "version: gt	"
 
 echo ""
 do_test attrs1 "string: eq (and)     "
 do_test attrs2 "string: lt / gt (and)"
 do_test attrs3 "string: ne (or)      "
 do_test attrs4 "string: exists       "
 do_test attrs5 "string: not_exists   "
 do_test attrs6 "is_dc: true          "
 do_test attrs7 "is_dc: false         "
 do_test attrs8 "score_attribute      "
 
 echo ""
 do_test mon-rsc-1 "Schedule Monitor - start"
 do_test mon-rsc-2 "Schedule Monitor - move "
 do_test mon-rsc-3 "Schedule Monitor - pending start     "
 do_test mon-rsc-4 "Schedule Monitor - move/pending start"
 
 echo ""
 do_test rec-rsc-0 "Resource Recover - no start     "
 do_test rec-rsc-1 "Resource Recover - start        "
 do_test rec-rsc-2 "Resource Recover - monitor      "
 do_test rec-rsc-3 "Resource Recover - stop - ignore"
 do_test rec-rsc-4 "Resource Recover - stop - block "
 do_test rec-rsc-5 "Resource Recover - stop - fence "
 do_test rec-rsc-6 "Resource Recover - multiple - restart"
 do_test rec-rsc-7 "Resource Recover - multiple - stop   "
 do_test rec-rsc-8 "Resource Recover - multiple - block  "
 do_test rec-rsc-9 "Resource Recover - group/group"
 
 echo ""
 do_test quorum-1 "No quorum - ignore"
 do_test quorum-2 "No quorum - freeze"
 do_test quorum-3 "No quorum - stop  "
 do_test quorum-4 "No quorum - start anyway"
 do_test quorum-5 "No quorum - start anyway (group)"
 do_test quorum-6 "No quorum - start anyway (clone)"
 
 echo ""
 do_test rec-node-1 "Node Recover - Startup   - no fence"
 do_test rec-node-2 "Node Recover - Startup   - fence   "
 do_test rec-node-3 "Node Recover - HA down   - no fence"
 do_test rec-node-4 "Node Recover - HA down   - fence   "
 do_test rec-node-5 "Node Recover - CRM down  - no fence"
 do_test rec-node-6 "Node Recover - CRM down  - fence   "
 do_test rec-node-7 "Node Recover - no quorum - ignore  "
 do_test rec-node-8 "Node Recover - no quorum - freeze  "
 do_test rec-node-9 "Node Recover - no quorum - stop    "
 do_test rec-node-10 "Node Recover - no quorum - stop w/fence"
 do_test rec-node-11 "Node Recover - CRM down w/ group - fence   "
 do_test rec-node-12 "Node Recover - nothing active - fence   "
 do_test rec-node-13 "Node Recover - failed resource + shutdown - fence   "
 do_test rec-node-15 "Node Recover - unknown lrm section"
 do_test rec-node-14 "Serialize all stonith's"
 
 echo ""
 do_test multi1 "Multiple Active (stop/start)"
 
 echo ""
 do_test migrate-begin     "Normal migration"
 do_test migrate-success   "Completed migration"
 do_test migrate-partial-1 "Completed migration, missing stop on source"
 do_test migrate-partial-2 "Successful migrate_to only"
 do_test migrate-partial-3 "Successful migrate_to only, target down"
 do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from"
 
 do_test migrate-fail-2 "Failed migrate_from"
 do_test migrate-fail-3 "Failed migrate_from + stop on source"
 do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-5 "Failed migrate_from + stop on source and target"
 
 do_test migrate-fail-6 "Failed migrate_to"
 do_test migrate-fail-7 "Failed migrate_to + stop on source"
 do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-9 "Failed migrate_to + stop on source and target"
 
 do_test migrate-stop "Migration in a stopping stack"
 do_test migrate-start "Migration in a starting stack"
 do_test migrate-stop_start "Migration in a restarting stack"
 do_test migrate-stop-complex "Migration in a complex stopping stack"
 do_test migrate-start-complex "Migration in a complex starting stack"
 do_test migrate-stop-start-complex "Migration in a complex moving stack"
 do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown"
 
 do_test migrate-1 "Migrate (migrate)"
 do_test migrate-2 "Migrate (stable)"
 do_test migrate-3 "Migrate (failed migrate_to)"
 do_test migrate-4 "Migrate (failed migrate_from)"
 do_test novell-252693 "Migration in a stopping stack"
 do_test novell-252693-2 "Migration in a starting stack"
 do_test novell-252693-3 "Non-Migration in a starting and stopping stack"
 do_test bug-1820 "Migration in a group"
 do_test bug-1820-1 "Non-migration in a group"
 do_test migrate-5 "Primitive migration with a clone"
 do_test migrate-fencing "Migration after Fencing"
 
 #echo ""
 #do_test complex1 "Complex	"
 
 do_test bug-lf-2422 "Dependancy on partially active group - stop ocfs:*"
 
 echo ""
 do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node"
 do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones"
 do_test clone-anon-failcount "Merge failcounts for anonymous clones"
 do_test inc0 "Incarnation start" 
 do_test inc1 "Incarnation start order" 
 do_test inc2 "Incarnation silent restart, stop, move"
 do_test inc3 "Inter-incarnation ordering, silent restart, stop, move"
 do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)"
 do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)"
 do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)"
 do_test inc7 "Clone colocation"
 do_test inc8 "Clone anti-colocation"
 do_test inc9 "Non-unique clone"
 do_test inc10 "Non-unique clone (stop)"
 do_test inc11 "Primitive colocation with clones" 
 do_test inc12 "Clone shutdown" 
 do_test cloned-group "Make sure only the correct number of cloned groups are started"
 do_test clone-no-shuffle "Dont prioritize allocation of instances that must be moved"
 do_test clone-max-zero "Orphan processing with clone-max=0"
 do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"
 do_test bug-lf-2160 "Dont shuffle clones due to colocation"
 do_test bug-lf-2213 "clone-node-max enforcement for cloned groups"
 do_test bug-lf-2153 "Clone ordering constraints"
 do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable"
 do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone"
 do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)"
 do_test clone-colocate-instance-2 "Colocation with a specific clone instance"
 do_test clone-order-instance "Ordering with specific clone instances"
 do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation"
 do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups" 
 do_test bug-lf-2544 "Balanced clone placement"
 do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0"
 do_test bug-lf-2574 "Avoid clone shuffle"
 do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start"
 
 echo ""
 do_test master-0 "Stopped -> Slave"
 do_test master-1 "Stopped -> Promote"
 do_test master-2 "Stopped -> Promote : notify"
 do_test master-3 "Stopped -> Promote : master location"
 do_test master-4 "Started -> Promote : master location"
 do_test master-5 "Promoted -> Promoted"
 do_test master-6 "Promoted -> Promoted (2)"
 do_test master-7 "Promoted -> Fenced"
 do_test master-8 "Promoted -> Fenced -> Moved"
 do_test master-9 "Stopped + Promotable + No quorum"
 do_test master-10 "Stopped -> Promotable : notify with monitor"
 do_test master-11 "Stopped -> Promote : colocation"
 do_test novell-239082 "Demote/Promote ordering"
 do_test novell-239087 "Stable master placement"
 do_test master-12 "Promotion based solely on rsc_location constraints"
 do_test master-13 "Include preferences of colocated resources when placing master"
 do_test master-demote "Ordering when actions depends on demoting a slave resource" 
 do_test master-ordering "Prevent resources from starting that need a master"
 do_test bug-1765 "Master-Master Colocation (dont stop the slaves)"
 do_test master-group "Promotion of cloned groups"
 do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily"
 do_test master-failed-demote "Dont retry failed demote actions"
 do_test master-failed-demote-2 "Dont retry failed demote actions (notify=false)"
 do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does"
 do_test master-reattach "Re-attach to a running master"
 do_test master-allow-start "Don't include master score if it would prevent allocation"
 do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints"
 do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly"
 do_test master-role "Prevent target-role from promoting more than master-max instances"
 do_test bug-lf-2358 "Master-Master anti-colocation"
 do_test master-promotion-constraint "Mandatory master colocation constraints"
 do_test unmanaged-master "Ensure role is preserved for unmanaged resources"
 do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters"
 do_test master-demote-2 "Demote does not clear past failure"
 do_test master-move "Move master based on failure of colocated group"   
 do_test master-probed-score "Observe the promotion score of probed resources"
 do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint"
 do_test colocation_constraint_stops_slave  "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint"
 do_test order_constraint_stops_master      "cl#5054 - Ensure master is demoted when stopped by order constraint"
 do_test order_constraint_stops_slave       "cl#5054 - Ensure slave is not demoted when stopped by order constraint"
 do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion."
 
 echo ""
 do_test history-1 "Correctly parse stateful-1 resource state"
 
 echo ""
 do_test managed-0 "Managed (reference)"
 do_test managed-1 "Not managed - down "
 do_test managed-2 "Not managed - up   "
 do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource"
 do_test bug-5028-detach "Ensure detach still works"
 do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack"
 
 echo ""
 do_test interleave-0 "Interleave (reference)"
 do_test interleave-1 "coloc - not interleaved"
 do_test interleave-2 "coloc - interleaved   "
 do_test interleave-3 "coloc - interleaved (2)"
 do_test interleave-pseudo-stop "Interleaved clone during stonith"
 do_test interleave-stop "Interleaved clone during stop"
 do_test interleave-restart "Interleaved clone during dependancy restart"
 
 echo ""
 do_test notify-0 "Notify reference"
 do_test notify-1 "Notify simple"
 do_test notify-2 "Notify simple, confirm"
 do_test notify-3 "Notify move, confirm"
 do_test novell-239079 "Notification priority"
 #do_test notify-2 "Notify - 764"
 
 echo ""
 do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition"
 do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1"
 do_test 696 "OSDL #696 - CRM starts stonith RA without monitor"
 do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop"
 do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3"
 do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1"
 do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id"
 do_test 829 "OSDL #829"
 do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted"
 do_test 994-2 "OSDL #994 - with a dependant resource"
 do_test 1360 "OSDL #1360 - Clone stickiness"
 do_test 1484 "OSDL #1484 - on_fail=stop"
 do_test 1494 "OSDL #1494 - Clone stability"
 do_test unrunnable-1 "Unrunnable"
 do_test stonith-0 "Stonith loop - 1"
 do_test stonith-1 "Stonith loop - 2"
 do_test stonith-2 "Stonith loop - 3"
 do_test stonith-3 "Stonith startup"
 do_test stonith-4 "Stonith node state" --rc 4
 do_test bug-1572-1 "Recovery of groups depending on master/slave"
 do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted"
 do_test bug-1685 "Depends-on-master ordering"
 do_test bug-1822 "Dont promote partially active groups"
 do_test bug-pm-11 "New resource added to a m/s group"
 do_test bug-pm-12 "Recover only the failed portion of a cloned group"
 do_test bug-n-387749 "Don't shuffle clone instances"
 do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped"
 do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node"
 do_test bug-lf-1920 "Correctly handle probes that find active resources"
 do_test bnc-515172 "Location constraint with multiple expressions"
 do_test colocate-primitive-with-clone "Optional colocation with a clone"
 do_test use-after-free-merge "Use-after-free in native_merge_weights"
 do_test bug-lf-2551 "STONITH ordering for stop"
 do_test bug-lf-2606 "Stonith implies demote"
 do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults"
 do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering"
 do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false"
 do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false"
 do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false"
 do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts."
 do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false"
 do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false."
 do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false."
 do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false"
 do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true"
 do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources."
 do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases"
 do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload"
 do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change."
 do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart"
 do_test failcount "Ensure failcounts are correctly expired"
 do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart"
 do_test monitor-onfail-stop    "bug-5058 - Monitor failure wiht on-fail set to stop"
 do_test bug-5059 "No need to restart p_stateful1:*"
 do_test bug-5069-op-enabled  "Test on-fail=ignore with failure when monitor is enabled."
 do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled."
 
 do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc."
 do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith."
 do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group"
 do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group"
 do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)."
 do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)"
 do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group."
 do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs."
 
 echo ""
 do_test systemhealth1  "System Health ()               #1"
 do_test systemhealth2  "System Health ()               #2"
 do_test systemhealth3  "System Health ()               #3"
 do_test systemhealthn1 "System Health (None)           #1"
 do_test systemhealthn2 "System Health (None)           #2"
 do_test systemhealthn3 "System Health (None)           #3"
 do_test systemhealthm1 "System Health (Migrate On Red) #1"
 do_test systemhealthm2 "System Health (Migrate On Red) #2"
 do_test systemhealthm3 "System Health (Migrate On Red) #3"
 do_test systemhealtho1 "System Health (Only Green)     #1"
 do_test systemhealtho2 "System Health (Only Green)     #2"
 do_test systemhealtho3 "System Health (Only Green)     #3"
 do_test systemhealthp1 "System Health (Progessive)     #1"
 do_test systemhealthp2 "System Health (Progessive)     #2"
 do_test systemhealthp3 "System Health (Progessive)     #3"
 
 echo ""
 do_test utilization "Placement Strategy - utilization"
 do_test minimal     "Placement Strategy - minimal"
 do_test balanced    "Placement Strategy - balanced"
 
 echo ""
 do_test placement-stickiness "Optimized Placement Strategy - stickiness"
 do_test placement-priority   "Optimized Placement Strategy - priority"
 do_test placement-location   "Optimized Placement Strategy - location"
 do_test placement-capacity   "Optimized Placement Strategy - capacity"
 
 echo ""
 do_test utilization-order1 "Utilization Order - Simple"
 do_test utilization-order2 "Utilization Order - Complex"
 do_test utilization-order3 "Utilization Order - Migrate"
 do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)"
 do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3"
 do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)"
 
 echo ""
 do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
 
 echo ""
 do_test stopped-monitor-00	"Stopped Monitor - initial start"
 do_test stopped-monitor-01	"Stopped Monitor - failed started"
 do_test stopped-monitor-02	"Stopped Monitor - started multi-up"
 do_test stopped-monitor-03	"Stopped Monitor - stop started"
 do_test stopped-monitor-04	"Stopped Monitor - failed stop"
 do_test stopped-monitor-05	"Stopped Monitor - start unmanaged"
 do_test stopped-monitor-06	"Stopped Monitor - unmanaged multi-up"
 do_test stopped-monitor-07	"Stopped Monitor - start unmanaged multi-up"
 do_test stopped-monitor-08	"Stopped Monitor - migrate"
 do_test stopped-monitor-09	"Stopped Monitor - unmanage started"
 do_test stopped-monitor-10	"Stopped Monitor - unmanaged started multi-up"
 do_test stopped-monitor-11	"Stopped Monitor - stop unmanaged started"
 do_test stopped-monitor-12	"Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")"
 do_test stopped-monitor-20	"Stopped Monitor - initial stop"
 do_test stopped-monitor-21	"Stopped Monitor - stopped single-up"
 do_test stopped-monitor-22	"Stopped Monitor - stopped multi-up"
 do_test stopped-monitor-23	"Stopped Monitor - start stopped"
 do_test stopped-monitor-24	"Stopped Monitor - unmanage stopped"
 do_test stopped-monitor-25	"Stopped Monitor - unmanaged stopped multi-up"
 do_test stopped-monitor-26	"Stopped Monitor - start unmanaged stopped"
 do_test stopped-monitor-27	"Stopped Monitor - unmanaged stopped multi-up (target-role="Started")"
 do_test stopped-monitor-30	"Stopped Monitor - new node started"
 do_test stopped-monitor-31	"Stopped Monitor - new node stopped"
 
 echo""
 do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)"
 do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)"
 do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)"
 do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)"
 do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)"
 do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)"
 do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)"
 do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)"
 do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)"
 do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)"
 do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)"
 do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)"
 
 do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)"
 do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)"
 do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)"
 do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)"
 do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)"
 do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)"
 do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)"
 do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)"
 do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)"
 do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)"
 do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)"
 do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)"
 do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)"
 do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)"
 do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)"
 do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)"
 do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)"
 do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)"
 do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)"
 do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)"
 do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)"
 do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)"
 do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)"
 
 do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)"
 do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)"
 do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)"
 do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)"
 do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)"
 do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)"
 do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)"
 do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)"
 do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)"
 do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)"
 do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)"
 do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)"
  
 echo""
 do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)"
 do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)"
 do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)"
 do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)"
 do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)"
 do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)"
 do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)"
 do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)"
 do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)"
 do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)"
 do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)"
 do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)"
 
 do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)"
 do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)"
 do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)"
 do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)"
 do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)"
 do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)"
 do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)"
 do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)"
 do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)"
 do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)"
 do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)"
 do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)"
  
 echo""  
 do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)"
 do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)"
 do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)"
 do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)"
 do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)"
 do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)"
 do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)"
 do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)"
 do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)"
 do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)"
 do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)"
 do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)"
 
 do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)"
 do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)"
 do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)"
 do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)"
 do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)"
 do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)"
 do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)"
 do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)"
 do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)"
 do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)"
 do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)"
 do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)"
 
 echo ""
 do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)"
 do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)"
 do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)"
 do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)"
 do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)"
 
 do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)"
 do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)"
 do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)"
 do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)"
 do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)"
 
 echo ""
 do_test template-1 "Template - 1"
 do_test template-2 "Template - 2"
 do_test template-3 "Template - 3 (merge operations)"
 
 do_test template-coloc-1 "Template - Colocation 1"
 do_test template-coloc-2 "Template - Colocation 2"
 do_test template-coloc-3 "Template - Colocation 3"
 do_test template-order-1 "Template - Order 1"
 do_test template-order-2 "Template - Order 2"
 do_test template-order-3 "Template - Order 3"
 do_test template-ticket  "Template - Ticket"
 
 do_test template-rsc-sets-1  "Template - Resource Sets 1"
 do_test template-rsc-sets-2  "Template - Resource Sets 2"
 do_test template-rsc-sets-3  "Template - Resource Sets 3"
 do_test template-rsc-sets-4  "Template - Resource Sets 4"
 
 echo ""
 
 test_results
diff --git a/pengine/test10/group-fail.dot b/pengine/test10/group-fail.dot
new file mode 100644
index 0000000000..58aec718f2
--- /dev/null
+++ b/pengine/test10/group-fail.dot
@@ -0,0 +1,38 @@
+digraph "g" {
+"all_stopped" [ style=bold color="green" fontcolor="orange"]
+"group1_running_0" [ style=bold color="green" fontcolor="orange"]
+"group1_start_0" -> "group1_running_0" [ style = bold]
+"group1_start_0" -> "rsc1_start_0 node1" [ style = bold]
+"group1_start_0" -> "rsc2_start_0 node1" [ style = bold]
+"group1_start_0" -> "rsc3_start_0 node1" [ style = bold]
+"group1_start_0" -> "rsc4_start_0 node1" [ style = bold]
+"group1_start_0" [ style=bold color="green" fontcolor="orange"]
+"group1_stop_0" -> "group1_stopped_0" [ style = bold]
+"group1_stop_0" -> "rsc2_stop_0 node1" [ style = bold]
+"group1_stop_0" -> "rsc4_stop_0 node1" [ style = bold]
+"group1_stop_0" [ style=bold color="green" fontcolor="orange"]
+"group1_stopped_0" -> "group1_start_0" [ style = bold]
+"group1_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"probe_complete node1" [ style=bold color="green" fontcolor="black"]
+"probe_complete node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_start_0 node1" -> "group1_running_0" [ style = bold]
+"rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold]
+"rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2_start_0 node1" -> "group1_running_0" [ style = bold]
+"rsc2_start_0 node1" -> "rsc3_start_0 node1" [ style = bold]
+"rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2_stop_0 node1" -> "all_stopped" [ style = bold]
+"rsc2_stop_0 node1" -> "group1_stopped_0" [ style = bold]
+"rsc2_stop_0 node1" -> "rsc2_start_0 node1" [ style = bold]
+"rsc2_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc3_start_0 node1" -> "group1_running_0" [ style = bold]
+"rsc3_start_0 node1" -> "rsc4_start_0 node1" [ style = bold]
+"rsc3_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc4_start_0 node1" -> "group1_running_0" [ style = bold]
+"rsc4_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc4_stop_0 node1" -> "all_stopped" [ style = bold]
+"rsc4_stop_0 node1" -> "group1_stopped_0" [ style = bold]
+"rsc4_stop_0 node1" -> "rsc2_stop_0 node1" [ style = bold]
+"rsc4_stop_0 node1" -> "rsc4_start_0 node1" [ style = bold]
+"rsc4_stop_0 node1" [ style=bold color="green" fontcolor="black"]
+}
diff --git a/pengine/test10/group-fail.exp b/pengine/test10/group-fail.exp
new file mode 100644
index 0000000000..b9ed6c2cc3
--- /dev/null
+++ b/pengine/test10/group-fail.exp
@@ -0,0 +1,192 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" batch-limit="30" transition_id="0">
+  <synapse id="0">
+    <action_set>
+      <pseudo_event id="14" operation="stopped" operation_key="group1_stopped_0">
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="6" operation="stop" operation_key="rsc2_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="9" operation="stop" operation_key="rsc4_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="13" operation="stop" operation_key="group1_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <pseudo_event id="13" operation="stop" operation_key="group1_stop_0">
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </pseudo_event>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <pseudo_event id="12" operation="running" operation_key="group1_running_0">
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="5" operation="start" operation_key="rsc1_start_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="7" operation="start" operation_key="rsc2_start_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="8" operation="start" operation_key="rsc3_start_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="10" operation="start" operation_key="rsc4_start_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="11" operation="start" operation_key="group1_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="3">
+    <action_set>
+      <pseudo_event id="11" operation="start" operation_key="group1_start_0">
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="14" operation="stopped" operation_key="group1_stopped_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="4">
+    <action_set>
+      <rsc_op id="5" operation="start" operation_key="rsc1_start_0" on_node="node1" on_node_uuid="uuid1">
+        <primitive id="rsc1" class="heartbeat" type="apache"/>
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="11" operation="start" operation_key="group1_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="5">
+    <action_set>
+      <rsc_op id="7" operation="start" operation_key="rsc2_start_0" on_node="node1" on_node_uuid="uuid1">
+        <primitive id="rsc2" class="heartbeat" type="apache"/>
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="5" operation="start" operation_key="rsc1_start_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="6" operation="stop" operation_key="rsc2_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="11" operation="start" operation_key="group1_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="6">
+    <action_set>
+      <rsc_op id="6" operation="stop" operation_key="rsc2_stop_0" on_node="node1" on_node_uuid="uuid1">
+        <primitive id="rsc2" class="heartbeat" type="apache"/>
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="9" operation="stop" operation_key="rsc4_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="13" operation="stop" operation_key="group1_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="7">
+    <action_set>
+      <rsc_op id="8" operation="start" operation_key="rsc3_start_0" on_node="node1" on_node_uuid="uuid1">
+        <primitive id="rsc3" class="heartbeat" type="apache"/>
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="7" operation="start" operation_key="rsc2_start_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="11" operation="start" operation_key="group1_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="8">
+    <action_set>
+      <rsc_op id="10" operation="start" operation_key="rsc4_start_0" on_node="node1" on_node_uuid="uuid1">
+        <primitive id="rsc4" class="heartbeat" type="apache"/>
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="8" operation="start" operation_key="rsc3_start_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="9" operation="stop" operation_key="rsc4_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="11" operation="start" operation_key="group1_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="9">
+    <action_set>
+      <rsc_op id="9" operation="stop" operation_key="rsc4_stop_0" on_node="node1" on_node_uuid="uuid1">
+        <primitive id="rsc4" class="heartbeat" type="apache"/>
+        <attributes CRM_meta_timeout="20000" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="13" operation="stop" operation_key="group1_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="10" priority="1000000">
+    <action_set>
+      <rsc_op id="4" operation="probe_complete" operation_key="probe_complete" on_node="node2" on_node_uuid="uuid2">
+        <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="11" priority="1000000">
+    <action_set>
+      <rsc_op id="3" operation="probe_complete" operation_key="probe_complete" on_node="node1" on_node_uuid="uuid1">
+        <attributes CRM_meta_op_no_wait="true" crm_feature_set="3.0.6"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="12">
+    <action_set>
+      <pseudo_event id="1" operation="all_stopped" operation_key="all_stopped">
+        <attributes crm_feature_set="3.0.6"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="6" operation="stop" operation_key="rsc2_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="9" operation="stop" operation_key="rsc4_stop_0" on_node="node1" on_node_uuid="uuid1"/>
+      </trigger>
+    </inputs>
+  </synapse>
+</transition_graph>
+
diff --git a/pengine/test10/group-fail.scores b/pengine/test10/group-fail.scores
new file mode 100644
index 0000000000..12cba91685
--- /dev/null
+++ b/pengine/test10/group-fail.scores
@@ -0,0 +1,19 @@
+Allocation scores:
+group_color: group1 allocation score on node1: 0
+group_color: group1 allocation score on node2: 0
+group_color: rsc1 allocation score on node1: 0
+group_color: rsc1 allocation score on node2: 0
+group_color: rsc2 allocation score on node1: 0
+group_color: rsc2 allocation score on node2: 0
+group_color: rsc3 allocation score on node1: 0
+group_color: rsc3 allocation score on node2: 0
+group_color: rsc4 allocation score on node1: 0
+group_color: rsc4 allocation score on node2: 0
+native_color: rsc1 allocation score on node1: 0
+native_color: rsc1 allocation score on node2: 0
+native_color: rsc2 allocation score on node1: 0
+native_color: rsc2 allocation score on node2: -INFINITY
+native_color: rsc3 allocation score on node1: 0
+native_color: rsc3 allocation score on node2: -INFINITY
+native_color: rsc4 allocation score on node1: 0
+native_color: rsc4 allocation score on node2: -INFINITY
diff --git a/pengine/test10/group-fail.summary b/pengine/test10/group-fail.summary
new file mode 100644
index 0000000000..aa03d2121e
--- /dev/null
+++ b/pengine/test10/group-fail.summary
@@ -0,0 +1,38 @@
+
+Current cluster status:
+Online: [ node1 node2 ]
+
+ Resource Group: group1
+     rsc1	(heartbeat:apache):	Stopped 
+     rsc2	(heartbeat:apache):	Started node1
+     rsc3	(heartbeat:apache):	Stopped 
+     rsc4	(heartbeat:apache):	Started node1
+
+Transition Summary:
+ * Start   rsc1	(node1)
+ * Restart rsc2	(Started node1)
+ * Start   rsc3	(node1)
+ * Restart rsc4	(Started node1)
+
+Executing cluster transition:
+ * Pseudo action:   group1_stop_0
+ * Resource action: rsc4            stop on node1
+ * Resource action: rsc2            stop on node1
+ * Pseudo action:   all_stopped
+ * Pseudo action:   group1_stopped_0
+ * Pseudo action:   group1_start_0
+ * Resource action: rsc1            start on node1
+ * Resource action: rsc2            start on node1
+ * Resource action: rsc3            start on node1
+ * Resource action: rsc4            start on node1
+ * Pseudo action:   group1_running_0
+
+Revised cluster status:
+Online: [ node1 node2 ]
+
+ Resource Group: group1
+     rsc1	(heartbeat:apache):	Started node1
+     rsc2	(heartbeat:apache):	Started node1
+     rsc3	(heartbeat:apache):	Started node1
+     rsc4	(heartbeat:apache):	Started node1
+
diff --git a/pengine/test10/group-fail.xml b/pengine/test10/group-fail.xml
new file mode 100644
index 0000000000..15f26a78de
--- /dev/null
+++ b/pengine/test10/group-fail.xml
@@ -0,0 +1,64 @@
+<cib admin_epoch="0" epoch="1" num_updates="21" dc-uuid="0" have-quorum="false" remote-tls-port="0" validate-with="pacemaker-1.0" cib-last-written="Thu Oct 11 16:15:31 2012">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="no-stonith">
+        <nvpair id="opt-no-stonith" name="stonith-enabled" value="false"/>
+      </cluster_property_set>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="nvpair.id21832" name="no-quorum-policy" value="ignore"/>
+        <nvpair id="nvpair.id21841" name="symmetric-cluster" value="true"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="uuid1" uname="node1" type="member"/>
+      <node id="uuid2" uname="node2" type="member"/>
+    </nodes>
+    <resources>
+      <group id="group1">
+        <primitive id="rsc1" class="heartbeat" type="apache"/>
+        <primitive id="rsc2" class="heartbeat" type="apache"/>
+        <primitive id="rsc3" class="heartbeat" type="apache"/>
+        <primitive id="rsc4" class="heartbeat" type="apache"/>
+      </group>
+    </resources>
+    <constraints/>
+  </configuration>
+  <status>
+    <node_state id="uuid1" ha="active" uname="node1" crmd="online" join="member" expected="member" in_ccm="true">
+      <lrm id="uuid1">
+        <lrm_resources>
+          <lrm_resource id="rsc1" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc3" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc3_last_0" operation_key="rsc3_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc4" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc4_last_0" operation_key="rsc4_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;1:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="0" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="uuid2" ha="active" uname="node2" crmd="online" join="member" expected="member" in_ccm="true">
+      <lrm id="uuid2">
+        <lrm_resources>
+          <lrm_resource id="rsc1" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc3" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc3_last_0" operation_key="rsc3_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="rsc4" class="heartbeat" type="apache">
+            <lrm_rsc_op id="rsc4_last_0" operation_key="rsc4_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" crm_feature_set="3.0.6" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+  </status>
+</cib>