diff --git a/pengine/allocate.c b/pengine/allocate.c
index 01b5b985ff..7956916376 100644
--- a/pengine/allocate.c
+++ b/pengine/allocate.c
@@ -1,2492 +1,2505 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pengine.h>
 #include <allocate.h>
 #include <utils.h>
 
 CRM_TRACE_INIT_DATA(pe_allocate);
 
 void set_alloc_actions(pe_working_set_t * data_set);
 void migrate_reload_madness(pe_working_set_t * data_set);
 extern void ReloadRsc(resource_t * rsc, node_t *node, pe_working_set_t * data_set);
 extern gboolean DeleteRsc(resource_t * rsc, node_t * node, gboolean optional, pe_working_set_t * data_set);
 static void apply_remote_node_ordering(pe_working_set_t *data_set);
 static enum remote_connection_state get_remote_node_state(pe_node_t *node);
 enum remote_connection_state 
 {
     remote_state_unknown = 0,
     remote_state_alive = 1,
     remote_state_resting = 2,
     remote_state_failed = 3,
     remote_state_stopped = 4
 };
 
 
 resource_alloc_functions_t resource_class_alloc_functions[] = {
     {
      native_merge_weights,
      native_color,
      native_create_actions,
      native_create_probe,
      native_internal_constraints,
      native_rsc_colocation_lh,
      native_rsc_colocation_rh,
      native_rsc_location,
      native_action_flags,
      native_update_actions,
      native_expand,
      native_append_meta,
      },
     {
      group_merge_weights,
      group_color,
      group_create_actions,
      native_create_probe,
      group_internal_constraints,
      group_rsc_colocation_lh,
      group_rsc_colocation_rh,
      group_rsc_location,
      group_action_flags,
      group_update_actions,
      group_expand,
      group_append_meta,
      },
     {
      clone_merge_weights,
      clone_color,
      clone_create_actions,
      clone_create_probe,
      clone_internal_constraints,
      clone_rsc_colocation_lh,
      clone_rsc_colocation_rh,
      clone_rsc_location,
      clone_action_flags,
      container_update_actions,
      clone_expand,
      clone_append_meta,
      },
     {
      master_merge_weights,
      master_color,
      master_create_actions,
      clone_create_probe,
      master_internal_constraints,
      clone_rsc_colocation_lh,
      master_rsc_colocation_rh,
      clone_rsc_location,
      clone_action_flags,
      container_update_actions,
      clone_expand,
      master_append_meta,
      },
     {
      container_merge_weights,
      container_color,
      container_create_actions,
      container_create_probe,
      container_internal_constraints,
      container_rsc_colocation_lh,
      container_rsc_colocation_rh,
      container_rsc_location,
      container_action_flags,
      container_update_actions,
      container_expand,
      container_append_meta,
      }
 };
 
 gboolean
 update_action_flags(action_t * action, enum pe_action_flags flags, const char *source, int line)
 {
     static unsigned long calls = 0;
     gboolean changed = FALSE;
     gboolean clear = is_set(flags, pe_action_clear);
     enum pe_action_flags last = action->flags;
 
     if (clear) {
         action->flags = crm_clear_bit(source, line, action->uuid, action->flags, flags);
     } else {
         action->flags = crm_set_bit(source, line, action->uuid, action->flags, flags);
     }
 
     if (last != action->flags) {
         calls++;
         changed = TRUE;
         /* Useful for tracking down _who_ changed a specific flag */
         /* CRM_ASSERT(calls != 534); */
         clear_bit(flags, pe_action_clear);
         crm_trace("%s on %s: %sset flags 0x%.6x (was 0x%.6x, now 0x%.6x, %lu, %s)",
                   action->uuid, action->node ? action->node->details->uname : "[none]",
                   clear ? "un-" : "", flags, last, action->flags, calls, source);
     }
 
     return changed;
 }
 
 static gboolean
 check_rsc_parameters(resource_t * rsc, node_t * node, xmlNode * rsc_entry,
                      gboolean active_here, pe_working_set_t * data_set)
 {
     int attr_lpc = 0;
     gboolean force_restart = FALSE;
     gboolean delete_resource = FALSE;
     gboolean changed = FALSE;
 
     const char *value = NULL;
     const char *old_value = NULL;
 
     const char *attr_list[] = {
         XML_ATTR_TYPE,
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER
     };
 
     for (; attr_lpc < DIMOF(attr_list); attr_lpc++) {
         value = crm_element_value(rsc->xml, attr_list[attr_lpc]);
         old_value = crm_element_value(rsc_entry, attr_list[attr_lpc]);
         if (value == old_value  /* ie. NULL */
             || crm_str_eq(value, old_value, TRUE)) {
             continue;
         }
 
         changed = TRUE;
         trigger_unfencing(rsc, node, "Device definition changed", NULL, data_set);
         if (active_here) {
             force_restart = TRUE;
             crm_notice("Forcing restart of %s on %s, %s changed: %s -> %s",
                        rsc->id, node->details->uname, attr_list[attr_lpc],
                        crm_str(old_value), crm_str(value));
         }
     }
     if (force_restart) {
         /* make sure the restart happens */
         stop_action(rsc, node, FALSE);
         set_bit(rsc->flags, pe_rsc_start_pending);
         delete_resource = TRUE;
 
     } else if (changed) {
         delete_resource = TRUE;
     }
     return delete_resource;
 }
 
 static void
 CancelXmlOp(resource_t * rsc, xmlNode * xml_op, node_t * active_node,
             const char *reason, pe_working_set_t * data_set)
 {
     int interval = 0;
     action_t *cancel = NULL;
 
     char *key = NULL;
     const char *task = NULL;
     const char *call_id = NULL;
     const char *interval_s = NULL;
 
     CRM_CHECK(xml_op != NULL, return);
     CRM_CHECK(active_node != NULL, return);
 
     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
 
     interval = crm_parse_int(interval_s, "0");
 
     /* we need to reconstruct the key because of the way we used to construct resource IDs */
     key = generate_op_key(rsc->id, task, interval);
 
     crm_info("Action %s on %s will be stopped: %s",
              key, active_node->details->uname, reason ? reason : "unknown");
 
     /* TODO: This looks highly dangerous if we ever try to schedule 'key' too */
     cancel = custom_action(rsc, strdup(key), RSC_CANCEL, active_node, FALSE, TRUE, data_set);
 
     free(cancel->task);
     free(cancel->cancel_task);
     cancel->task = strdup(RSC_CANCEL);
     cancel->cancel_task = strdup(task);
 
     add_hash_param(cancel->meta, XML_LRM_ATTR_TASK, task);
     add_hash_param(cancel->meta, XML_LRM_ATTR_CALLID, call_id);
     add_hash_param(cancel->meta, XML_LRM_ATTR_INTERVAL, interval_s);
 
     custom_action_order(rsc, stop_key(rsc), NULL, rsc, NULL, cancel, pe_order_optional, data_set);
     free(key);
     key = NULL;
 }
 
 static gboolean
 check_action_definition(resource_t * rsc, node_t * active_node, xmlNode * xml_op,
                         pe_working_set_t * data_set)
 {
     char *key = NULL;
     int interval = 0;
     const char *interval_s = NULL;
     const op_digest_cache_t *digest_data = NULL;
     gboolean did_change = FALSE;
 
     const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     const char *op_version;
     const char *digest_secure = NULL;
 
     CRM_CHECK(active_node != NULL, return FALSE);
     if (safe_str_eq(task, RSC_STOP)) {
         return FALSE;
     }
 
     interval_s = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL);
     interval = crm_parse_int(interval_s, "0");
 
     if (interval > 0) {
         xmlNode *op_match = NULL;
 
         /* we need to reconstruct the key because of the way we used to construct resource IDs */
         key = generate_op_key(rsc->id, task, interval);
 
         pe_rsc_trace(rsc, "Checking parameters for %s", key);
         op_match = find_rsc_op_entry(rsc, key);
 
         if (op_match == NULL && is_set(data_set->flags, pe_flag_stop_action_orphans)) {
             CancelXmlOp(rsc, xml_op, active_node, "orphan", data_set);
             free(key);
             return TRUE;
 
         } else if (op_match == NULL) {
             pe_rsc_debug(rsc, "Orphan action detected: %s on %s", key, active_node->details->uname);
             free(key);
             return TRUE;
         }
         free(key);
         key = NULL;
     }
 
     crm_trace("Testing %s_%s_%d on %s",
               rsc->id, task, interval, active_node->details->uname);
     if (interval == 0 && safe_str_eq(task, RSC_STATUS)) {
         /* Reload based on the start action not a probe */
         task = RSC_START;
 
     } else if (interval == 0 && safe_str_eq(task, RSC_MIGRATED)) {
         /* Reload based on the start action not a migrate */
         task = RSC_START;
     } else if (interval == 0 && safe_str_eq(task, RSC_PROMOTE)) {
         /* Reload based on the start action not a promote */
         task = RSC_START;
     }
 
     op_version = crm_element_value(xml_op, XML_ATTR_CRM_VERSION);
     digest_data = rsc_action_digest_cmp(rsc, xml_op, active_node, data_set);
 
     if(is_set(data_set->flags, pe_flag_sanitized)) {
         digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
     }
 
     if(digest_data->rc != RSC_DIGEST_MATCH
        && digest_secure
        && digest_data->digest_secure_calc
        && strcmp(digest_data->digest_secure_calc, digest_secure) == 0) {
         fprintf(stdout, "Only 'private' parameters to %s_%s_%d on %s changed: %s\n",
                 rsc->id, task, interval, active_node->details->uname,
                 crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
 
     } else if (digest_data->rc == RSC_DIGEST_RESTART) {
         /* Changes that force a restart */
         const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
 
         did_change = TRUE;
         key = generate_op_key(rsc->id, task, interval);
         crm_log_xml_info(digest_data->params_restart, "params:restart");
         pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (restart:%s) %s",
                  key, active_node->details->uname,
                  crm_str(digest_restart), digest_data->digest_restart_calc,
                  op_version, crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
 
         custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set);
         trigger_unfencing(rsc, active_node, "Device parameters changed", NULL, data_set);
 
     } else if ((digest_data->rc == RSC_DIGEST_ALL) || (digest_data->rc == RSC_DIGEST_UNKNOWN)) {
         /* Changes that can potentially be handled by a reload */
         const char *digest_restart = crm_element_value(xml_op, XML_LRM_ATTR_RESTART_DIGEST);
         const char *digest_all = crm_element_value(xml_op, XML_LRM_ATTR_OP_DIGEST);
 
         did_change = TRUE;
         trigger_unfencing(rsc, active_node, "Device parameters changed (reload)", NULL, data_set);
         crm_log_xml_info(digest_data->params_all, "params:reload");
         key = generate_op_key(rsc->id, task, interval);
         pe_rsc_info(rsc, "Parameters to %s on %s changed: was %s vs. now %s (reload:%s) %s",
                  key, active_node->details->uname,
                  crm_str(digest_all), digest_data->digest_all_calc, op_version,
                  crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
 
         if (interval > 0) {
             action_t *op = NULL;
 
 #if 0
             /* Always reload/restart the entire resource */
             ReloadRsc(rsc, active_node, data_set);
 #else
             /* Re-sending the recurring op is sufficient - the old one will be cancelled automatically */
             op = custom_action(rsc, key, task, active_node, TRUE, TRUE, data_set);
             set_bit(op->flags, pe_action_reschedule);
 #endif
 
         } else if (digest_restart && rsc->isolation_wrapper == NULL && (uber_parent(rsc))->isolation_wrapper == NULL) {
             pe_rsc_trace(rsc, "Reloading '%s' action for resource %s", task, rsc->id);
 
             /* Reload this resource */
             ReloadRsc(rsc, active_node, data_set);
             free(key);
 
         } else {
             pe_rsc_trace(rsc, "Resource %s doesn't know how to reload", rsc->id);
 
             /* Re-send the start/demote/promote op
              * Recurring ops will be detected independently
              */
             custom_action(rsc, key, task, NULL, FALSE, TRUE, data_set);
         }
     }
 
     return did_change;
 }
 
 
 static void
 check_actions_for(xmlNode * rsc_entry, resource_t * rsc, node_t * node, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     int offset = -1;
     int interval = 0;
     int stop_index = 0;
     int start_index = 0;
 
     const char *task = NULL;
     const char *interval_s = NULL;
 
     xmlNode *rsc_op = NULL;
     GListPtr op_list = NULL;
     GListPtr sorted_op_list = NULL;
     gboolean is_probe = FALSE;
     gboolean did_change = FALSE;
 
     CRM_CHECK(node != NULL, return);
 
     if (is_set(rsc->flags, pe_rsc_orphan)) {
         resource_t *parent = uber_parent(rsc);
         if(parent == NULL
            || pe_rsc_is_clone(parent) == FALSE
            || is_set(parent->flags, pe_rsc_unique)) {
             pe_rsc_trace(rsc, "Skipping param check for %s and deleting: orphan", rsc->id);
             DeleteRsc(rsc, node, FALSE, data_set);
         } else {
             pe_rsc_trace(rsc, "Skipping param check for %s (orphan clone)", rsc->id);
         }
         return;
 
     } else if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
         if (check_rsc_parameters(rsc, node, rsc_entry, FALSE, data_set)) {
             DeleteRsc(rsc, node, FALSE, data_set);
         }
         pe_rsc_trace(rsc, "Skipping param check for %s: no longer active on %s",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_trace(rsc, "Processing %s on %s", rsc->id, node->details->uname);
 
     if (check_rsc_parameters(rsc, node, rsc_entry, TRUE, data_set)) {
         DeleteRsc(rsc, node, FALSE, data_set);
     }
 
     for (rsc_op = __xml_first_child(rsc_entry); rsc_op != NULL; rsc_op = __xml_next_element(rsc_op)) {
         if (crm_str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, TRUE)) {
             op_list = g_list_prepend(op_list, rsc_op);
         }
     }
 
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         offset++;
 
         if (start_index < stop_index) {
             /* stopped */
             continue;
         } else if (offset < start_index) {
             /* action occurred prior to a start */
             continue;
         }
 
         is_probe = FALSE;
         did_change = FALSE;
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
 
         interval_s = crm_element_value(rsc_op, XML_LRM_ATTR_INTERVAL);
         interval = crm_parse_int(interval_s, "0");
 
         if (interval == 0 && safe_str_eq(task, RSC_STATUS)) {
             is_probe = TRUE;
         }
 
         if (interval > 0 &&
             (is_set(rsc->flags, pe_rsc_maintenance) || node->details->maintenance)) {
             CancelXmlOp(rsc, rsc_op, node, "maintenance mode", data_set);
 
         } else if (is_probe || safe_str_eq(task, RSC_START) || safe_str_eq(task, RSC_PROMOTE) || interval > 0
                    || safe_str_eq(task, RSC_MIGRATED)) {
             did_change = check_action_definition(rsc, node, rsc_op, data_set);
         }
 
         if (did_change && get_failcount(node, rsc, NULL, data_set)) {
             char *key = NULL;
             action_t *action_clear = NULL;
 
             key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
             action_clear =
                 custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT, node, FALSE, TRUE, data_set);
             set_bit(action_clear->flags, pe_action_runnable);
 
             crm_notice("Clearing failure of %s on %s "
                        "because action definition changed " CRM_XS " %s",
                        rsc->id, node->details->uname, action_clear->uuid);
         }
     }
 
     g_list_free(sorted_op_list);
 
 }
 
 static GListPtr
 find_rsc_list(GListPtr result, resource_t * rsc, const char *id, gboolean renamed_clones,
               gboolean partial, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     gboolean match = FALSE;
 
     if (id == NULL) {
         return NULL;
 
     } else if (rsc == NULL && data_set) {
 
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
         }
 
         return result;
 
     } else if (rsc == NULL) {
         return NULL;
     }
 
     if (partial) {
         if (strstr(rsc->id, id)) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strstr(rsc->clone_name, id)) {
             match = TRUE;
         }
 
     } else {
         if (strcmp(rsc->id, id) == 0) {
             match = TRUE;
 
         } else if (renamed_clones && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
             match = TRUE;
         }
     }
 
     if (match) {
         result = g_list_prepend(result, rsc);
     }
 
     if (rsc->children) {
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             result = find_rsc_list(result, child, id, renamed_clones, partial, NULL);
         }
     }
 
     return result;
 }
 
 static void
 check_actions(pe_working_set_t * data_set)
 {
     const char *id = NULL;
     node_t *node = NULL;
     xmlNode *lrm_rscs = NULL;
     xmlNode *status = get_object_root(XML_CIB_TAG_STATUS, data_set->input);
 
     xmlNode *node_state = NULL;
 
     for (node_state = __xml_first_child(status); node_state != NULL;
          node_state = __xml_next_element(node_state)) {
         if (crm_str_eq((const char *)node_state->name, XML_CIB_TAG_STATE, TRUE)) {
             id = crm_element_value(node_state, XML_ATTR_ID);
             lrm_rscs = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
             lrm_rscs = find_xml_node(lrm_rscs, XML_LRM_TAG_RESOURCES, FALSE);
 
             node = pe_find_node_id(data_set->nodes, id);
 
             if (node == NULL) {
                 continue;
 
             /* Still need to check actions for a maintenance node to cancel existing monitor operations */
             } else if (can_run_resources(node) == FALSE && node->details->maintenance == FALSE) {
                 crm_trace("Skipping param check for %s: can't run resources",
                           node->details->uname);
                 continue;
             }
 
             crm_trace("Processing node %s", node->details->uname);
             if (node->details->online || is_set(data_set->flags, pe_flag_stonith_enabled)) {
                 xmlNode *rsc_entry = NULL;
 
                 for (rsc_entry = __xml_first_child(lrm_rscs); rsc_entry != NULL;
                      rsc_entry = __xml_next_element(rsc_entry)) {
                     if (crm_str_eq((const char *)rsc_entry->name, XML_LRM_TAG_RESOURCE, TRUE)) {
 
                         if (xml_has_children(rsc_entry)) {
                             GListPtr gIter = NULL;
                             GListPtr result = NULL;
                             const char *rsc_id = ID(rsc_entry);
 
                             CRM_CHECK(rsc_id != NULL, return);
 
                             result = find_rsc_list(NULL, NULL, rsc_id, TRUE, FALSE, data_set);
                             for (gIter = result; gIter != NULL; gIter = gIter->next) {
                                 resource_t *rsc = (resource_t *) gIter->data;
 
                                 if (rsc->variant != pe_native) {
                                     continue;
                                 }
                                 check_actions_for(rsc_entry, rsc, node, data_set);
                             }
                             g_list_free(result);
                         }
                     }
                 }
             }
         }
     }
 }
 
 static gboolean
 apply_placement_constraints(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying constraints...");
 
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         rsc_to_node_t *cons = (rsc_to_node_t *) gIter->data;
 
         cons->rsc_lh->cmds->rsc_location(cons->rsc_lh, cons);
     }
 
     return TRUE;
 
 }
 
 static gboolean
 failcount_clear_action_exists(node_t * node, resource_t * rsc)
 {
     gboolean rc = FALSE;
     char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
     GListPtr list = find_actions_exact(rsc->actions, key, node);
 
     if (list) {
         rc = TRUE;
     }
     g_list_free(list);
     free(key);
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Force resource away if failures hit migration threshold
  *
  * \param[in,out] rsc       Resource to check for failures
  * \param[in,out] node      Node to check for failures
  * \param[in,out] data_set  Cluster working set to update
  */
 static void
 check_migration_threshold(resource_t *rsc, node_t *node,
                           pe_working_set_t *data_set)
 {
     int fail_count, countdown;
     resource_t *failed;
 
     /* Migration threshold of 0 means never force away */
     if (rsc->migration_threshold == 0) {
         return;
     }
 
     /* If there are no failures, there's no need to force away */
     fail_count = get_failcount_all(node, rsc, NULL, data_set);
     if (fail_count <= 0) {
         return;
     }
 
     /* How many more times recovery will be tried on this node */
     countdown = QB_MAX(rsc->migration_threshold - fail_count, 0);
 
     /* If failed resource has a parent, we'll force the parent away */
     failed = rsc;
     if (is_not_set(rsc->flags, pe_rsc_unique)) {
         failed = uber_parent(rsc);
     }
 
     if (countdown == 0) {
         resource_location(failed, node, -INFINITY, "__fail_limit__", data_set);
         crm_warn("Forcing %s away from %s after %d failures (max=%d)",
                  failed->id, node->details->uname, fail_count,
                  rsc->migration_threshold);
     } else {
         crm_info("%s can fail %d more times on %s before being forced off",
                  failed->id, countdown, node->details->uname);
     }
 }
 
 static void
 common_apply_stickiness(resource_t * rsc, node_t * node, pe_working_set_t * data_set)
 {
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child_rsc = (resource_t *) gIter->data;
 
             common_apply_stickiness(child_rsc, node, data_set);
         }
         return;
     }
 
     if (is_set(rsc->flags, pe_rsc_managed)
         && rsc->stickiness != 0 && g_list_length(rsc->running_on) == 1) {
         node_t *current = pe_find_node_id(rsc->running_on, node->details->id);
         node_t *match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
 
         if (current == NULL) {
 
         } else if (match != NULL || is_set(data_set->flags, pe_flag_symmetric_cluster)) {
             resource_t *sticky_rsc = rsc;
 
             resource_location(sticky_rsc, node, rsc->stickiness, "stickiness", data_set);
             pe_rsc_debug(sticky_rsc, "Resource %s: preferring current location"
                          " (node=%s, weight=%d)", sticky_rsc->id,
                          node->details->uname, rsc->stickiness);
         } else {
             GHashTableIter iter;
             node_t *nIter = NULL;
 
             pe_rsc_debug(rsc, "Ignoring stickiness for %s: the cluster is asymmetric"
                          " and node %s is not explicitly allowed", rsc->id, node->details->uname);
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void **)&nIter)) {
                 crm_err("%s[%s] = %d", rsc->id, nIter->details->uname, nIter->weight);
             }
         }
     }
 
     /* Check the migration threshold only if a failcount clear action
      * has not already been placed for this resource on the node.
      * There is no sense in potentially forcing the resource from this
      * node if the failcount is being reset anyway. */
     if (failcount_clear_action_exists(node, rsc) == FALSE) {
         check_migration_threshold(rsc, node, data_set);
     }
 }
 
 void
 complex_set_cmds(resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     rsc->cmds = &resource_class_alloc_functions[rsc->variant];
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         complex_set_cmds(child_rsc);
     }
 }
 
 void
 set_alloc_actions(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         complex_set_cmds(rsc);
     }
 }
 
 static void
 calculate_system_health(gpointer gKey, gpointer gValue, gpointer user_data)
 {
     const char *key = (const char *)gKey;
     const char *value = (const char *)gValue;
     int *system_health = (int *)user_data;
 
     if (!gKey || !gValue || !user_data) {
         return;
     }
 
     /* Does it start with #health? */
     if (0 == strncmp(key, "#health", 7)) {
         int score;
 
         /* Convert the value into an integer */
         score = char2score(value);
 
         /* Add it to the running total */
         *system_health = merge_weights(score, *system_health);
     }
 }
 
 static gboolean
 apply_system_health(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *health_strategy = pe_pref(data_set->config_hash, "node-health-strategy");
     int base_health = 0;
 
     if (health_strategy == NULL || safe_str_eq(health_strategy, "none")) {
         /* Prevent any accidental health -> score translation */
         node_score_red = 0;
         node_score_yellow = 0;
         node_score_green = 0;
         return TRUE;
 
     } else if (safe_str_eq(health_strategy, "migrate-on-red")) {
 
         /* Resources on nodes which have health values of red are
          * weighted away from that node.
          */
         node_score_red = -INFINITY;
         node_score_yellow = 0;
         node_score_green = 0;
 
     } else if (safe_str_eq(health_strategy, "only-green")) {
 
         /* Resources on nodes which have health values of red or yellow
          * are forced away from that node.
          */
         node_score_red = -INFINITY;
         node_score_yellow = -INFINITY;
         node_score_green = 0;
 
     } else if (safe_str_eq(health_strategy, "progressive")) {
         /* Same as the above, but use the r/y/g scores provided by the user
          * Defaults are provided by the pe_prefs table
          * Also, custom health "base score" can be used
          */
         base_health = crm_parse_int(pe_pref(data_set->config_hash, "node-health-base"), "0");
 
     } else if (safe_str_eq(health_strategy, "custom")) {
 
         /* Requires the admin to configure the rsc_location constaints for
          * processing the stored health scores
          */
         /* TODO: Check for the existence of appropriate node health constraints */
         return TRUE;
 
     } else {
         crm_err("Unknown node health strategy: %s", health_strategy);
         return FALSE;
     }
 
     crm_info("Applying automated node health strategy: %s", health_strategy);
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         int system_health = base_health;
         node_t *node = (node_t *) gIter->data;
 
         /* Search through the node hash table for system health entries. */
         g_hash_table_foreach(node->details->attrs, calculate_system_health, &system_health);
 
         crm_info(" Node %s has an combined system health of %d",
                  node->details->uname, system_health);
 
         /* If the health is non-zero, then create a new rsc2node so that the
          * weight will be added later on.
          */
         if (system_health != 0) {
 
             GListPtr gIter2 = data_set->resources;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 resource_t *rsc = (resource_t *) gIter2->data;
 
                 rsc2node_new(health_strategy, rsc, system_health, NULL, node, data_set);
             }
         }
 
     }
 
     return TRUE;
 }
 
 gboolean
 stage0(pe_working_set_t * data_set)
 {
     xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
     if (data_set->input == NULL) {
         return FALSE;
     }
 
     if (is_set(data_set->flags, pe_flag_have_status) == FALSE) {
         crm_trace("Calculating status");
         cluster_status(data_set);
     }
 
     set_alloc_actions(data_set);
     apply_system_health(data_set);
     unpack_constraints(cib_constraints, data_set);
 
     return TRUE;
 }
 
 /*
  * Check nodes for resources started outside of the LRM
  */
 gboolean
 probe_resources(pe_working_set_t * data_set)
 {
     action_t *probe_node_complete = NULL;
 
     for (GListPtr gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
         const char *probed = g_hash_table_lookup(node->details->attrs, CRM_OP_PROBED);
 
         if (is_container_remote_node(node)) {
             /* TODO enable guest node probes once ordered probing is implemented */
             continue;
 
         } else if (node->details->online == FALSE && node->details->remote_rsc) {
-            // TODO figure out why this results in fence loop
-            /*
             enum remote_connection_state state = get_remote_node_state(node);
             if(state == remote_state_failed) {
                 pe_fence_node(data_set, node, "the connection is unrecoverable");
             }
-            */
             continue;
 
         } else if(node->details->online == FALSE) {
             continue;
 
         } else if (node->details->unclean) {
             continue;
 
         } else if (node->details->rsc_discovery_enabled == FALSE) {
             /* resource discovery is disabled for this node */
             continue;
         }
 
         if (probed != NULL && crm_is_true(probed) == FALSE) {
             action_t *probe_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_REPROBE, node->details->uname),
                                                CRM_OP_REPROBE, node, FALSE, TRUE, data_set);
 
             add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
             continue;
         }
 
         for (GListPtr gIter2 = data_set->resources; gIter2 != NULL; gIter2 = gIter2->next) {
             resource_t *rsc = (resource_t *) gIter2->data;
 
             rsc->cmds->create_probe(rsc, node, probe_node_complete, FALSE, data_set);
         }
     }
     return TRUE;
 }
 
 static void
 rsc_discover_filter(resource_t *rsc, node_t *node)
 {
     GListPtr gIter = rsc->children;
     resource_t *top = uber_parent(rsc);
     node_t *match;
 
     if (rsc->exclusive_discover == FALSE && top->exclusive_discover == FALSE) {
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
         rsc_discover_filter(child_rsc, node);
     }
 
     match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
     if (match && match->rsc_discover_mode != discover_exclusive) {
         match->weight = -INFINITY;
     }
 }
 
 /*
  * Count how many valid nodes we have (so we know the maximum number of
  *  colors we can resolve).
  *
  * Apply node constraints (ie. filter the "allowed_nodes" part of resources
  */
 gboolean
 stage2(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying placement constraints");
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         if (node == NULL) {
             /* error */
 
         } else if (node->weight >= 0.0  /* global weight */
                    && node->details->online && node->details->type != node_ping) {
             data_set->max_valid_nodes++;
         }
     }
 
     apply_placement_constraints(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         GListPtr gIter2 = NULL;
         node_t *node = (node_t *) gIter->data;
 
         gIter2 = data_set->resources;
         for (; gIter2 != NULL; gIter2 = gIter2->next) {
             resource_t *rsc = (resource_t *) gIter2->data;
 
             common_apply_stickiness(rsc, node, data_set);
             rsc_discover_filter(rsc, node);
         }
     }
 
     return TRUE;
 }
 
 /*
  * Create internal resource constraints before allocation
  */
 gboolean
 stage3(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         rsc->cmds->internal_constraints(rsc, data_set);
     }
 
     return TRUE;
 }
 
 /*
  * Check for orphaned or redefined actions
  */
 gboolean
 stage4(pe_working_set_t * data_set)
 {
     check_actions(data_set);
     return TRUE;
 }
 
 static gint
 sort_rsc_process_order(gconstpointer a, gconstpointer b, gpointer data)
 {
     int rc = 0;
     int r1_weight = -INFINITY;
     int r2_weight = -INFINITY;
 
     const char *reason = "existence";
 
     const GListPtr nodes = (GListPtr) data;
     resource_t *resource1 = (resource_t *) convert_const_pointer(a);
     resource_t *resource2 = (resource_t *) convert_const_pointer(b);
 
     node_t *r1_node = NULL;
     node_t *r2_node = NULL;
     GListPtr gIter = NULL;
     GHashTable *r1_nodes = NULL;
     GHashTable *r2_nodes = NULL;
 
     if (a == NULL && b == NULL) {
         goto done;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     reason = "priority";
     r1_weight = resource1->priority;
     r2_weight = resource2->priority;
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "no node list";
     if (nodes == NULL) {
         goto done;
     }
 
     r1_nodes =
         rsc_merge_weights(resource1, resource1->id, NULL, NULL, 1,
                           pe_weights_forward | pe_weights_init);
     dump_node_scores(LOG_TRACE, NULL, resource1->id, r1_nodes);
     r2_nodes =
         rsc_merge_weights(resource2, resource2->id, NULL, NULL, 1,
                           pe_weights_forward | pe_weights_init);
     dump_node_scores(LOG_TRACE, NULL, resource2->id, r2_nodes);
 
     /* Current location score */
     reason = "current location";
     r1_weight = -INFINITY;
     r2_weight = -INFINITY;
 
     if (resource1->running_on) {
         r1_node = g_list_nth_data(resource1->running_on, 0);
         r1_node = g_hash_table_lookup(r1_nodes, r1_node->details->id);
         if (r1_node != NULL) {
             r1_weight = r1_node->weight;
         }
     }
     if (resource2->running_on) {
         r2_node = g_list_nth_data(resource2->running_on, 0);
         r2_node = g_hash_table_lookup(r2_nodes, r2_node->details->id);
         if (r2_node != NULL) {
             r2_weight = r2_node->weight;
         }
     }
 
     if (r1_weight > r2_weight) {
         rc = -1;
         goto done;
     }
 
     if (r1_weight < r2_weight) {
         rc = 1;
         goto done;
     }
 
     reason = "score";
     for (gIter = nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         r1_node = NULL;
         r2_node = NULL;
 
         r1_weight = -INFINITY;
         if (r1_nodes) {
             r1_node = g_hash_table_lookup(r1_nodes, node->details->id);
         }
         if (r1_node) {
             r1_weight = r1_node->weight;
         }
 
         r2_weight = -INFINITY;
         if (r2_nodes) {
             r2_node = g_hash_table_lookup(r2_nodes, node->details->id);
         }
         if (r2_node) {
             r2_weight = r2_node->weight;
         }
 
         if (r1_weight > r2_weight) {
             rc = -1;
             goto done;
         }
 
         if (r1_weight < r2_weight) {
             rc = 1;
             goto done;
         }
     }
 
   done:
     crm_trace("%s (%d) on %s %c %s (%d) on %s: %s",
               resource1->id, r1_weight, r1_node ? r1_node->details->id : "n/a",
               rc < 0 ? '>' : rc > 0 ? '<' : '=',
               resource2->id, r2_weight, r2_node ? r2_node->details->id : "n/a", reason);
 
     if (r1_nodes) {
         g_hash_table_destroy(r1_nodes);
     }
     if (r2_nodes) {
         g_hash_table_destroy(r2_nodes);
     }
 
     return rc;
 }
 
 static void
 allocate_resources(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         /* Force remote connection resources to be allocated first. This
          * also forces any colocation dependencies to be allocated as well */
         for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
             resource_t *rsc = (resource_t *) gIter->data;
             if (rsc->is_remote_node == FALSE) {
                 continue;
             }
             pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
             /* For remote node connection resources, always prefer the partial
              * migration target during resource allocation, if the rsc is in the
              * middle of a migration.
              */
             rsc->cmds->allocate(rsc, rsc->partial_migration_target, data_set);
         }
     }
 
     /* now do the rest of the resources */
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
         if (rsc->is_remote_node == TRUE) {
             continue;
         }
         pe_rsc_trace(rsc, "Allocating: %s", rsc->id);
         rsc->cmds->allocate(rsc, NULL, data_set);
     }
 }
 
 static void
 cleanup_orphans(resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (is_set(data_set->flags, pe_flag_stop_rsc_orphans) == FALSE) {
         return;
     }
 
     /* Don't recurse into ->children, those are just unallocated clone instances */
     if(is_not_set(rsc->flags, pe_rsc_orphan)) {
         return;
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         if (node->details->online && get_failcount(node, rsc, NULL, data_set)) {
             char *key = generate_op_key(rsc->id, CRM_OP_CLEAR_FAILCOUNT, 0);
             action_t *clear_op = custom_action(rsc, key, CRM_OP_CLEAR_FAILCOUNT,
                                                node, FALSE, TRUE, data_set);
 
             add_hash_param(clear_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
             pe_rsc_info(rsc,
                         "Clearing failure of %s on %s because it is orphaned "
                         CRM_XS " %s",
                         rsc->id, node->details->uname, clear_op->uuid);
 
             custom_action_order(rsc, NULL, clear_op,
                             rsc, generate_op_key(rsc->id, RSC_STOP, 0), NULL,
                             pe_order_optional, data_set);
         }
     }
 }
 
 gboolean
 stage5(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     if (safe_str_neq(data_set->placement_strategy, "default")) {
         GListPtr nodes = g_list_copy(data_set->nodes);
 
         nodes = g_list_sort_with_data(nodes, sort_node_weight, NULL);
 
         data_set->resources =
             g_list_sort_with_data(data_set->resources, sort_rsc_process_order, nodes);
 
         g_list_free(nodes);
     }
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Original", node);
     }
 
     crm_trace("Allocating services");
     /* Take (next) highest resource, assign it and create its actions */
 
     allocate_resources(data_set);
 
     gIter = data_set->nodes;
     for (; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         dump_node_capacity(show_utilization ? 0 : utilization_log_level, "Remaining", node);
     }
 
     if (is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_trace("Calculating needed probes");
         /* This code probably needs optimization
          * ptest -x with 100 nodes, 100 clones and clone-max=100:
 
          With probes:
 
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14781]: 2010/09/27_17:56:46 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14781]: 2010/09/27_17:56:47 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14781]: 2010/09/27_17:56:48 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14781]: 2010/09/27_17:56:49 notice: TRACE: stage5: allocate.c:894 Calculating needed probes
          ptest[14781]: 2010/09/27_17:56:51 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14781]: 2010/09/27_17:56:52 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          36s
          ptest[14781]: 2010/09/27_17:57:28 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
 
          Without probes:
 
          ptest[14637]: 2010/09/27_17:56:21 notice: TRACE: do_calculations: pengine.c:258 Calculate cluster status
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:278 Applying placement constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:285 Create internal constraints
          ptest[14637]: 2010/09/27_17:56:22 notice: TRACE: do_calculations: pengine.c:292 Check actions
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: do_calculations: pengine.c:299 Allocate resources
          ptest[14637]: 2010/09/27_17:56:23 notice: TRACE: stage5: allocate.c:881 Allocating services
          ptest[14637]: 2010/09/27_17:56:24 notice: TRACE: stage5: allocate.c:899 Creating actions
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: stage5: allocate.c:905 Creating done
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:306 Processing fencing and shutdown cases
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:313 Applying ordering constraints
          ptest[14637]: 2010/09/27_17:56:25 notice: TRACE: do_calculations: pengine.c:320 Create transition graph
         */
 
         probe_resources(data_set);
     }
 
     crm_trace("Handle orphans");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
         cleanup_orphans(rsc, data_set);
     }
 
     crm_trace("Creating actions");
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         rsc->cmds->create_actions(rsc, data_set);
     }
 
     crm_trace("Creating done");
     return TRUE;
 }
 
 static gboolean
 is_managed(const resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     if (is_set(rsc->flags, pe_rsc_managed)) {
         return TRUE;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         if (is_managed(child_rsc)) {
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 static gboolean
 any_managed_resources(pe_working_set_t * data_set)
 {
 
     GListPtr gIter = data_set->resources;
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         if (is_managed(rsc)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Create pseudo-op for guest node fence, and order relative to it
  *
  * \param[in] node      Guest node to fence
  * \param[in] done      STONITH_DONE operation
  * \param[in] data_set  Working set of CIB state
  */
 static void
 fence_guest(pe_node_t *node, pe_action_t *done, pe_working_set_t *data_set)
 {
     resource_t *container = node->details->remote_rsc->container;
     pe_action_t *stop = NULL;
     pe_action_t *stonith_op = NULL;
 
     /* The fence action is just a label; we don't do anything differently for
      * off vs. reboot. We specify it explicitly, rather than let it default to
      * cluster's default action, because we are not _initiating_ fencing -- we
      * are creating a pseudo-event to describe fencing that is already occurring
      * by other means (container recovery).
      */
     const char *fence_action = "off";
 
     /* Check whether guest's container resource is has any explicit stop or
      * start (the stop may be implied by fencing of the guest's host).
      */
     if (container) {
         stop = find_first_action(container->actions, NULL, CRMD_ACTION_STOP, NULL);
 
         if (find_first_action(container->actions, NULL, CRMD_ACTION_START, NULL)) {
             fence_action = "reboot";
         }
     }
 
     /* Create a fence pseudo-event, so we have an event to order actions
      * against, and crmd can always detect it.
      */
     stonith_op = pe_fence_op(node, fence_action, FALSE, "guest is unclean", data_set);
     update_action_flags(stonith_op, pe_action_pseudo | pe_action_runnable,
                         __FUNCTION__, __LINE__);
 
     /* We want to imply stops/demotes after the guest is stopped, not wait until
      * it is restarted, so we always order pseudo-fencing after stop, not start
      * (even though start might be closer to what is done for a real reboot).
      */
     if (stop) {
         order_actions(stop, stonith_op,
                       pe_order_runnable_left|pe_order_implies_then);
         crm_info("Implying guest node %s is down (action %d) "
                  "after container %s is stopped (action %d)",
                  node->details->uname, stonith_op->id,
                  container->id, stop->id);
     } else {
         crm_info("Implying guest node %s is down (action %d) ",
                  node->details->uname, stonith_op->id);
     }
 
     /* @TODO: Order pseudo-fence after any (optional) fence of guest's host */
 
     /* Order/imply other actions relative to pseudo-fence as with real fence */
     stonith_constraints(node, stonith_op, data_set);
     order_actions(stonith_op, done, pe_order_implies_then);
 }
 
 /*
  * Create dependencies for stonith and shutdown operations
  */
 gboolean
 stage6(pe_working_set_t * data_set)
 {
     action_t *dc_down = NULL;
     action_t *dc_fence = NULL;
     action_t *stonith_op = NULL;
     action_t *last_stonith = NULL;
     gboolean integrity_lost = FALSE;
     action_t *all_stopped = get_pseudo_op(ALL_STOPPED, data_set);
     action_t *done = get_pseudo_op(STONITH_DONE, data_set);
     gboolean need_stonith = TRUE;
     GListPtr gIter;
     GListPtr stonith_ops = NULL;
 
     /* Remote ordering constraints need to happen prior to calculate
      * fencing because it is one more place we will mark the node as
      * dirty.
      *
      * A nice side-effect of doing it first is that we can remove a
      * bunch of special logic from apply_*_ordering() because its
      * already part of pe_fence_node()
      */
     crm_trace("Creating remote ordering constraints");
     apply_remote_node_ordering(data_set);
 
     crm_trace("Processing fencing and shutdown cases");
     if (any_managed_resources(data_set) == FALSE) {
         crm_notice("Delaying fencing operations until there are resources to manage");
         need_stonith = FALSE;
     }
 
     /* Check each node for stonith/shutdown */
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (is_container_remote_node(node)) {
             if (node->details->remote_requires_reset && need_stonith) {
                 fence_guest(node, done, data_set);
             }
             continue;
         }
 
         stonith_op = NULL;
 
         if (node->details->unclean
             && need_stonith && pe_can_fence(data_set, node)) {
 
             stonith_op = pe_fence_op(node, NULL, FALSE, "node is unclean", data_set);
             pe_warn("Scheduling Node %s for STONITH", node->details->uname);
 
             stonith_constraints(node, stonith_op, data_set);
 
             if (node->details->is_dc) {
                 dc_down = stonith_op;
                 dc_fence = stonith_op;
 
             } else if (is_set(data_set->flags, pe_flag_concurrent_fencing) == FALSE) {
                 if (last_stonith) {
                     order_actions(last_stonith, stonith_op, pe_order_optional);
                 }
                 last_stonith = stonith_op;
 
             } else {
                 order_actions(stonith_op, done, pe_order_implies_then);
                 stonith_ops = g_list_append(stonith_ops, stonith_op);
             }
 
         } else if (node->details->online && node->details->shutdown &&
                 /* TODO define what a shutdown op means for a remote node.
                  * For now we do not send shutdown operations for remote nodes, but
                  * if we can come up with a good use for this in the future, we will. */
                     is_remote_node(node) == FALSE) {
 
             action_t *down_op = NULL;
 
             crm_notice("Scheduling Node %s for shutdown", node->details->uname);
 
             down_op = custom_action(NULL, crm_strdup_printf("%s-%s", CRM_OP_SHUTDOWN, node->details->uname),
                                     CRM_OP_SHUTDOWN, node, FALSE, TRUE, data_set);
 
             shutdown_constraints(node, down_op, data_set);
             add_hash_param(down_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
             if (node->details->is_dc) {
                 dc_down = down_op;
             }
         }
 
         if (node->details->unclean && stonith_op == NULL) {
             integrity_lost = TRUE;
             pe_warn("Node %s is unclean!", node->details->uname);
         }
     }
 
     if (integrity_lost) {
         if (is_set(data_set->flags, pe_flag_stonith_enabled) == FALSE) {
             pe_warn("YOUR RESOURCES ARE NOW LIKELY COMPROMISED");
             pe_err("ENABLE STONITH TO KEEP YOUR RESOURCES SAFE");
 
         } else if (is_set(data_set->flags, pe_flag_have_quorum) == FALSE) {
             crm_notice("Cannot fence unclean nodes until quorum is"
                        " attained (or no-quorum-policy is set to ignore)");
         }
     }
 
     if (dc_down != NULL) {
         GListPtr gIter = NULL;
 
         crm_trace("Ordering shutdowns before %s on %s (DC)",
                   dc_down->task, dc_down->node->details->uname);
 
         add_hash_param(dc_down->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
 
         for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
             action_t *node_stop = (action_t *) gIter->data;
 
             if (safe_str_neq(CRM_OP_SHUTDOWN, node_stop->task)) {
                 continue;
             } else if (node_stop->node->details->is_dc) {
                 continue;
             }
 
             crm_debug("Ordering shutdown on %s before %s on %s",
                       node_stop->node->details->uname,
                       dc_down->task, dc_down->node->details->uname);
 
             order_actions(node_stop, dc_down, pe_order_optional);
         }
 
         if (last_stonith) {
             if (dc_down != last_stonith) {
                 order_actions(last_stonith, dc_down, pe_order_optional);
             }
 
         } else {
             GListPtr gIter2 = NULL;
 
             for (gIter2 = stonith_ops; gIter2 != NULL; gIter2 = gIter2->next) {
                 stonith_op = (action_t *) gIter2->data;
 
                 if (dc_down != stonith_op) {
                     order_actions(stonith_op, dc_down, pe_order_optional);
                 }
             }
         }
     }
 
 
     if (dc_fence) {
         order_actions(dc_down, done, pe_order_implies_then);
 
     } else if (last_stonith) {
         order_actions(last_stonith, done, pe_order_implies_then);
     }
 
     order_actions(done, all_stopped, pe_order_implies_then);
 
     g_list_free(stonith_ops);
     return TRUE;
 }
 
 /*
  * Determine the sets of independent actions and the correct order for the
  *  actions in each set.
  *
  * Mark dependencies of un-runnable actions un-runnable
  *
  */
 static GListPtr
 find_actions_by_task(GListPtr actions, resource_t * rsc, const char *original_key)
 {
     GListPtr list = NULL;
 
     list = find_actions(actions, original_key, NULL);
     if (list == NULL) {
         /* we're potentially searching a child of the original resource */
         char *key = NULL;
         char *tmp = NULL;
         char *task = NULL;
         int interval = 0;
 
         if (parse_op_key(original_key, &tmp, &task, &interval)) {
             key = generate_op_key(rsc->id, task, interval);
             /* crm_err("looking up %s instead of %s", key, original_key); */
             /* slist_iter(action, action_t, actions, lpc, */
             /*         crm_err("  - %s", action->uuid)); */
             list = find_actions(actions, key, NULL);
 
         } else {
             crm_err("search key: %s", original_key);
         }
 
         free(key);
         free(tmp);
         free(task);
     }
 
     return list;
 }
 
 static void
 rsc_order_then(action_t * lh_action, resource_t * rsc, order_constraint_t * order)
 {
     GListPtr gIter = NULL;
     GListPtr rh_actions = NULL;
     action_t *rh_action = NULL;
     enum pe_ordering type = order->type;
 
     CRM_CHECK(rsc != NULL, return);
     CRM_CHECK(order != NULL, return);
 
     rh_action = order->rh_action;
     crm_trace("Processing RH of ordering constraint %d", order->id);
 
     if (rh_action != NULL) {
         rh_actions = g_list_prepend(NULL, rh_action);
 
     } else if (rsc != NULL) {
         rh_actions = find_actions_by_task(rsc->actions, rsc, order->rh_action_task);
     }
 
     if (rh_actions == NULL) {
         pe_rsc_trace(rsc, "No RH-Side (%s/%s) found for constraint..."
                      " ignoring", rsc->id, order->rh_action_task);
         if (lh_action) {
             pe_rsc_trace(rsc, "LH-Side was: %s", lh_action->uuid);
         }
         return;
     }
 
     if (lh_action && lh_action->rsc == rsc && is_set(lh_action->flags, pe_action_dangle)) {
         pe_rsc_trace(rsc, "Detected dangling operation %s -> %s", lh_action->uuid,
                      order->rh_action_task);
         clear_bit(type, pe_order_implies_then);
     }
 
     gIter = rh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *rh_action_iter = (action_t *) gIter->data;
 
         if (lh_action) {
             order_actions(lh_action, rh_action_iter, type);
 
         } else if (type & pe_order_implies_then) {
             update_action_flags(rh_action_iter, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__);
             crm_warn("Unrunnable %s 0x%.6x", rh_action_iter->uuid, type);
         } else {
             crm_warn("neither %s 0x%.6x", rh_action_iter->uuid, type);
         }
     }
 
     g_list_free(rh_actions);
 }
 
 static void
 rsc_order_first(resource_t * lh_rsc, order_constraint_t * order, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     GListPtr lh_actions = NULL;
     action_t *lh_action = order->lh_action;
     resource_t *rh_rsc = order->rh_rsc;
 
     crm_trace("Processing LH of ordering constraint %d", order->id);
     CRM_ASSERT(lh_rsc != NULL);
 
     if (lh_action != NULL) {
         lh_actions = g_list_prepend(NULL, lh_action);
 
     } else if (lh_action == NULL) {
         lh_actions = find_actions_by_task(lh_rsc->actions, lh_rsc, order->lh_action_task);
     }
 
     if (lh_actions == NULL && lh_rsc != rh_rsc) {
         char *key = NULL;
         char *rsc_id = NULL;
         char *op_type = NULL;
         int interval = 0;
 
         parse_op_key(order->lh_action_task, &rsc_id, &op_type, &interval);
         key = generate_op_key(lh_rsc->id, op_type, interval);
 
         if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_STOPPED && safe_str_eq(op_type, RSC_STOP)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else if (lh_rsc->fns->state(lh_rsc, TRUE) == RSC_ROLE_SLAVE && safe_str_eq(op_type, RSC_DEMOTE)) {
             free(key);
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - ignoring",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
 
         } else {
             pe_rsc_trace(lh_rsc, "No LH-Side (%s/%s) found for constraint %d with %s - creating",
                          lh_rsc->id, order->lh_action_task, order->id, order->rh_action_task);
             lh_action = custom_action(lh_rsc, key, op_type, NULL, TRUE, TRUE, data_set);
             lh_actions = g_list_prepend(NULL, lh_action);
         }
 
         free(op_type);
         free(rsc_id);
     }
 
     gIter = lh_actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *lh_action_iter = (action_t *) gIter->data;
 
         if (rh_rsc == NULL && order->rh_action) {
             rh_rsc = order->rh_action->rsc;
         }
         if (rh_rsc) {
             rsc_order_then(lh_action_iter, rh_rsc, order);
 
         } else if (order->rh_action) {
             order_actions(lh_action_iter, order->rh_action, order->type);
         }
     }
 
     g_list_free(lh_actions);
 }
 
 extern gboolean update_action(action_t * action);
 extern void update_colo_start_chain(action_t * action);
 
 static int
 is_recurring_action(action_t *action) 
 {
     const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
     int interval = crm_parse_int(interval_s, "0");
     if(interval > 0) {
         return TRUE;
     }
     return FALSE;
 }
 
 static void
 apply_container_ordering(action_t *action, pe_working_set_t *data_set)
 {
     /* VMs are also classified as containers for these purposes... in
      * that they both involve a 'thing' running on a real or remote
      * cluster node.
      *
      * This allows us to be smarter about the type and extent of
      * recovery actions required in various scenarios
      */
     resource_t *remote_rsc = NULL;
     resource_t *container = NULL;
     enum action_tasks task = text2task(action->task);
 
     if (action->rsc == NULL) {
         return;
     }
 
     CRM_ASSERT(action->node);
     CRM_ASSERT(is_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     container = remote_rsc->container;
     CRM_ASSERT(container);
 
     if(is_set(container->flags, pe_rsc_failed)) {
         pe_fence_node(data_set, action->node, "container failed");
     }
 
     crm_trace("Order %s action %s relative to %s%s for %s%s",
               action->task, action->uuid,
               is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id,
               is_set(container->flags, pe_rsc_failed)? "failed " : "",
               container->id);
 
     switch (task) {
         case start_rsc:
         case action_promote:
             /* Force resource recovery if the container is recovered */
             custom_action_order(container, generate_op_key(container->id, RSC_START, 0), NULL,
                                 action->rsc, NULL, action,
                                 pe_order_preserve | pe_order_implies_then | pe_order_runnable_left, data_set);
 
             /* Wait for the connection resource to be up too */
             custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
                                 action->rsc, NULL, action,
                                 pe_order_preserve | pe_order_runnable_left, data_set);
             break;
         case stop_rsc:
             if(is_set(container->flags, pe_rsc_failed)) {
                 /* When the container representing a guest node fails,
                  * the stop action for all the resources living in
                  * that container is implied by the container
                  * stopping. This is similar to how fencing operations
                  * work for cluster nodes.
                  */
             } else {
                 /* Otherwise, ensure the operation happens before the connection is brought down */
                 custom_action_order(action->rsc, NULL, action,
                                     remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
                                     pe_order_preserve, data_set);
             }
             break;
         case action_demote:
             if(is_set(container->flags, pe_rsc_failed)) {
                 /* Just like a stop, the demote is implied by the
                  * container having failed/stopped
                  *
                  * If we really wanted to we would order the demote
                  * after the stop, IFF the containers current role was
                  * stopped (otherwise we re-introduce an ordering
                  * loop)
                  */
 
             } else {
                 /* Otherwise, ensure the operation happens before the connection is brought down */
                 custom_action_order(action->rsc, NULL, action,
                                     remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
                                     pe_order_preserve, data_set);
             }
             break;
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 if(task != no_action) {
                     custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
                                         action->rsc, NULL, action,
                                         pe_order_preserve | pe_order_runnable_left | pe_order_implies_then, data_set);
                 }
             } else {
                 custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
                                     action->rsc, NULL, action,
                                     pe_order_preserve | pe_order_runnable_left, data_set);
             }
             break;
     }
 }
 
 static enum remote_connection_state
 get_remote_node_state(pe_node_t *node) 
 {
     resource_t *remote_rsc = NULL;
     node_t *cluster_node = NULL;
 
     if(node == NULL) {
         return remote_state_unknown;
     }
 
     remote_rsc = node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     if(remote_rsc->running_on) {
         cluster_node = remote_rsc->running_on->data;
     }
 
 
     /* If the cluster node the remote connection resource resides on
      * is unclean or went offline, we can't process any operations
      * on that remote node until after it starts elsewhere.
      */
     if(remote_rsc->next_role == RSC_ROLE_STOPPED || remote_rsc->allocated_to == NULL) {
-        /* There is nowhere left to run the connection resource,
-         * and the resource is in a failed state (either directly
-         * or because it is located on a failed node).
-         *
-         * If there are any resources known to be active on it (stop),
-         * or if there are resources in an unknown state (probe), we
-         * must assume the worst and fence it.
-         */
-        if (is_set(remote_rsc->flags, pe_rsc_failed)) {
-            return remote_state_failed;
-        } else if(cluster_node && cluster_node->details->unclean) {
+        /* The connection resource is not going to run anywhere */
+
+        if (cluster_node && cluster_node->details->unclean) {
+            /* The remote connection is failed because its resource is on a
+             * failed node and can't be recovered elsewhere, so we must fence.
+             */
             return remote_state_failed;
-        } else {
+        }
+
+        if (is_not_set(remote_rsc->flags, pe_rsc_failed)) {
+            /* Connection resource is cleanly stopped */
             return remote_state_stopped;
         }
 
+        /* Connection resource is failed */
+
+        if ((remote_rsc->next_role == RSC_ROLE_STOPPED)
+            && remote_rsc->remote_reconnect_interval
+            && node->details->remote_was_fenced) {
+
+            /* We won't know whether the connection is recoverable until the
+             * reconnect interval expires and we reattempt connection.
+             */
+            return remote_state_unknown;
+        }
+
+        /* The remote connection is in a failed state. If there are any
+         * resources known to be active on it (stop) or in an unknown state
+         * (probe), we must assume the worst and fence it.
+         */
+        return remote_state_failed;
+
     } else if (cluster_node == NULL) {
         /* Connection is recoverable but not currently running anywhere, see if we can recover it first */
         return remote_state_unknown;
 
     } else if(cluster_node->details->unclean == TRUE
               || cluster_node->details->online == FALSE) {
         /* Connection is running on a dead node, see if we can recover it first */
         return remote_state_resting;
 
     } else if (g_list_length(remote_rsc->running_on) > 1
                && remote_rsc->partial_migration_source
                && remote_rsc->partial_migration_target) {
         /* We're in the middle of migrating a connection resource,
          * wait until after the resource migrates before performing
          * any actions.
          */
         return remote_state_resting;
 
     }
     return remote_state_alive;
 }
 
 static void
 apply_remote_ordering(action_t *action, pe_working_set_t *data_set)
 {
     resource_t *remote_rsc = NULL;
     node_t *cluster_node = NULL;
     enum action_tasks task = text2task(action->task);
     enum remote_connection_state state = get_remote_node_state(action->node);
 
     enum pe_ordering order_opts = pe_order_none;
 
     if (action->rsc == NULL) {
         return;
     }
 
     CRM_ASSERT(action->node);
     CRM_ASSERT(is_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc);
 
     if(remote_rsc->running_on) {
         cluster_node = remote_rsc->running_on->data;
     }
 
     crm_trace("Order %s action %s relative to %s%s (state %d)",
               action->task, action->uuid,
               is_set(remote_rsc->flags, pe_rsc_failed)? "failed " : "",
               remote_rsc->id, state);
     switch (task) {
         case start_rsc:
         case action_promote:
             /* This as an internally generated constraint exempt from
              * user constraint prohibitions, and this action isn't runnable
              * if the connection start isn't runnable.
              */
             order_opts = pe_order_preserve | pe_order_runnable_left;
 
             if (state == remote_state_failed) {
                 /* Force recovery, by making this action required */
                 order_opts |= pe_order_implies_then;
             }
 
             /* Ensure connection is up before running this action */
             custom_action_order(remote_rsc,
                                 generate_op_key(remote_rsc->id, RSC_START, 0),
                                 NULL, action->rsc, NULL, action, order_opts,
                                 data_set);
             break;
 
         case stop_rsc:
             /* Handle special case with remote node where stop actions need to be
              * ordered after the connection resource starts somewhere else.
              */
             if(state == remote_state_resting) {
                 /* Wait for the connection resource to be up and assume everything is as we left it */
                 custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
                                     action->rsc, NULL, action,
                                     pe_order_preserve | pe_order_runnable_left, data_set);
 
             } else {
                 if(state == remote_state_failed) {
                     /* We would only be here if the resource is
                      * running on the remote node.  Since we have no
                      * way to stop it, it is necessary to fence the
                      * node.
                      */
                     pe_fence_node(data_set, action->node, "resources are active and the connection is unrecoverable");
                 }
 
                 custom_action_order(action->rsc, NULL, action,
                                     remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
                                     pe_order_preserve | pe_order_implies_first, data_set);
             }
             break;
 
         case action_demote:
             /* Only order this demote relative to the connection start if the
              * connection isn't being torn down. Otherwise, the demote would be
              * blocked because the connection start would not be allowed.
              */
             if(state == remote_state_resting || state == remote_state_unknown) {
                 custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
                                     action->rsc, NULL, action,
                                     pe_order_preserve, data_set);
             } /* Otherwise we can rely on the stop ordering */
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (is_recurring_action(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
                                     action->rsc, NULL, action,
                                     pe_order_preserve | pe_order_runnable_left | pe_order_implies_then, data_set);
 
             } else {
                 if(task == monitor_rsc && state == remote_state_failed) {
                     /* We would only be here if we do not know the
                      * state of the resource on the remote node.
                      * Since we have no way to find out, it is
                      * necessary to fence the node.
                      */
                     pe_fence_node(data_set, action->node, "resources are in an unknown state and the connection is unrecoverable");
                 }
 
                 if(cluster_node && state == remote_state_stopped) {
                     /* The connection is currently up, but is going
                      * down permanently.
                      *
                      * Make sure we check services are actually
                      * stopped _before_ we let the connection get
                      * closed
                      */
                     custom_action_order(action->rsc, NULL, action,
                                         remote_rsc, generate_op_key(remote_rsc->id, RSC_STOP, 0), NULL,
                                         pe_order_preserve | pe_order_runnable_left, data_set);
 
                 } else {
                     custom_action_order(remote_rsc, generate_op_key(remote_rsc->id, RSC_START, 0), NULL,
                                         action->rsc, NULL, action,
                                         pe_order_preserve | pe_order_runnable_left, data_set);
                 }
             }
             break;
     }
 }
 
 static void
 apply_remote_node_ordering(pe_working_set_t *data_set)
 {
     if (is_set(data_set->flags, pe_flag_have_remote_nodes) == FALSE) {
         return;
     }
 
     for (GListPtr gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         if (action->rsc == NULL) {
             continue;
         }
 
         /* Special case. */
         if (action->rsc &&
             action->rsc->is_remote_node &&
             safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
 
             /* If we are clearing the failcount of an actual remote node
              * connection resource, then make sure this happens before allowing
              * the connection to start if we are planning on starting the
              * connection during this transition.
              */
             custom_action_order(action->rsc,
                 NULL,
                 action,
                 action->rsc,
                 generate_op_key(action->rsc->id, RSC_START, 0),
                 NULL,
                 pe_order_optional,
                 data_set);
 
             continue;
         }
 
         /* If the action occurs on a Pacemaker Remote node, create
          * ordering constraints that guarantee the action occurs while the node
          * is active (after start, before stop ... things like that).
          */
         if (action->node == NULL ||
             is_remote_node(action->node) == FALSE ||
             action->node->details->remote_rsc == NULL ||
             is_set(action->flags, pe_action_pseudo)) {
             crm_trace("Nothing required for %s on %s", action->uuid, action->node?action->node->details->uname:"NA");
 
         } else if(action->node->details->remote_rsc->container) {
             crm_trace("Container ordering for %s", action->uuid);
             apply_container_ordering(action, data_set);
 
         } else {
             crm_trace("Remote ordering for %s", action->uuid);
             apply_remote_ordering(action, data_set);
         }
     }
 }
 
 static void
 order_probes(pe_working_set_t * data_set) 
 {
 #if 0
     GListPtr gIter = NULL;
 
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         /* Given "A then B", we would prefer to wait for A to be
          * started before probing B.
          *
          * If A was a filesystem on which the binaries and data for B
          * lived, it would have been useful if the author of B's agent
          * could assume that A is running before B.monitor will be
          * called.
          *
          * However we can't _only_ probe once A is running, otherwise
          * we'd not detect the state of B if A could not be started
          * for some reason.
          *
          * In practice however, we cannot even do an opportunistic
          * version of this because B may be moving:
          *
          *   B.probe -> B.start
          *   B.probe -> B.stop
          *   B.stop -> B.start
          *   A.stop -> A.start
          *   A.start -> B.probe
          *
          * So far so good, but if we add the result of this code:
          *
          *   B.stop -> A.stop
          *
          * Then we get a loop:
          *
          *   B.probe -> B.stop -> A.stop -> A.start -> B.probe
          *
          * We could kill the 'B.probe -> B.stop' dependency, but that
          * could mean stopping B "too" soon, because B.start must wait
          * for the probes to complete.
          *
          * Another option is to allow it only if A is a non-unique
          * clone with clone-max == node-max (since we'll never be
          * moving it).  However, we could still be stopping one
          * instance at the same time as starting another.
 
          * The complexity of checking for allowed conditions combined
          * with the ever narrowing usecase suggests that this code
          * should remain disabled until someone gets smarter.
          */
         action_t *start = NULL;
         GListPtr actions = NULL;
         GListPtr probes = NULL;
         char *key = NULL;
 
         key = start_key(rsc);
         actions = find_actions(rsc->actions, key, NULL);
         free(key);
 
         if (actions) {
             start = actions->data;
             g_list_free(actions);
         }
 
         if(start == NULL) {
             crm_err("No start action for %s", rsc->id);
             continue;
         }
 
         key = generate_op_key(rsc->id, CRMD_ACTION_STATUS, 0);
         probes = find_actions(rsc->actions, key, NULL);
         free(key);
 
         for (actions = start->actions_before; actions != NULL; actions = actions->next) {
             action_wrapper_t *before = (action_wrapper_t *) actions->data;
 
             GListPtr pIter = NULL;
             action_t *first = before->action;
             resource_t *first_rsc = first->rsc;
 
             if(first->required_runnable_before) {
                 GListPtr clone_actions = NULL;
                 for (clone_actions = first->actions_before; clone_actions != NULL; clone_actions = clone_actions->next) {
                     before = (action_wrapper_t *) clone_actions->data;
 
                     crm_trace("Testing %s -> %s (%p) for %s", first->uuid, before->action->uuid, before->action->rsc, start->uuid);
 
                     CRM_ASSERT(before->action->rsc);
                     first_rsc = before->action->rsc;
                     break;
                 }
 
             } else if(safe_str_neq(first->task, RSC_START)) {
                 crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
             }
 
             if(first_rsc == NULL) {
                 continue;
 
             } else if(uber_parent(first_rsc) == uber_parent(start->rsc)) {
                 crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
                 continue;
 
             } else if(FALSE && pe_rsc_is_clone(uber_parent(first_rsc)) == FALSE) {
                 crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
                 continue;
             }
 
             crm_err("Appplying %s before %s %d", first->uuid, start->uuid, uber_parent(first_rsc)->variant);
 
             for (pIter = probes; pIter != NULL; pIter = pIter->next) {
                 action_t *probe = (action_t *) pIter->data;
 
                 crm_err("Ordering %s before %s", first->uuid, probe->uuid);
                 order_actions(first, probe, pe_order_optional);
             }
         }
     }
 #endif
 }
 
 gboolean
 stage7(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
 
     crm_trace("Applying ordering constraints");
 
     /* Don't ask me why, but apparently they need to be processed in
      * the order they were created in... go figure
      *
      * Also g_list_append() has horrendous performance characteristics
      * So we need to use g_list_prepend() and then reverse the list here
      */
     data_set->ordering_constraints = g_list_reverse(data_set->ordering_constraints);
 
     for (gIter = data_set->ordering_constraints; gIter != NULL; gIter = gIter->next) {
         order_constraint_t *order = (order_constraint_t *) gIter->data;
         resource_t *rsc = order->lh_rsc;
 
         crm_trace("Applying ordering constraint: %d", order->id);
 
         if (rsc != NULL) {
             crm_trace("rsc_action-to-*");
             rsc_order_first(rsc, order, data_set);
             continue;
         }
 
         rsc = order->rh_rsc;
         if (rsc != NULL) {
             crm_trace("action-to-rsc_action");
             rsc_order_then(order->lh_action, rsc, order);
 
         } else {
             crm_trace("action-to-action");
             order_actions(order->lh_action, order->rh_action, order->type);
         }
     }
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         update_colo_start_chain(action);
     }
 
     crm_trace("Ordering probes");
     order_probes(data_set);
 
     crm_trace("Updating %d actions", g_list_length(data_set->actions));
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         update_action(action);
     }
 
     LogNodeActions(data_set, FALSE);
     for (gIter = data_set->resources; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         LogActions(rsc, data_set, FALSE);
     }
     return TRUE;
 }
 
 int transition_id = -1;
 
 /*
  * Create a dependency graph to send to the transitioner (via the CRMd)
  */
 gboolean
 stage8(pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     const char *value = NULL;
 
     transition_id++;
     crm_trace("Creating transition graph %d.", transition_id);
 
     data_set->graph = create_xml_node(NULL, XML_TAG_GRAPH);
 
     value = pe_pref(data_set->config_hash, "cluster-delay");
     crm_xml_add(data_set->graph, "cluster-delay", value);
 
     value = pe_pref(data_set->config_hash, "stonith-timeout");
     crm_xml_add(data_set->graph, "stonith-timeout", value);
 
     crm_xml_add(data_set->graph, "failed-stop-offset", "INFINITY");
 
     if (is_set(data_set->flags, pe_flag_start_failure_fatal)) {
         crm_xml_add(data_set->graph, "failed-start-offset", "INFINITY");
     } else {
         crm_xml_add(data_set->graph, "failed-start-offset", "1");
     }
 
     value = pe_pref(data_set->config_hash, "batch-limit");
     crm_xml_add(data_set->graph, "batch-limit", value);
 
     crm_xml_add_int(data_set->graph, "transition_id", transition_id);
 
     value = pe_pref(data_set->config_hash, "migration-limit");
     if (crm_int_helper(value, NULL) > 0) {
         crm_xml_add(data_set->graph, "migration-limit", value);
     }
 
 /* errors...
    slist_iter(action, action_t, action_list, lpc,
    if(action->optional == FALSE && action->runnable == FALSE) {
    print_action("Ignoring", action, TRUE);
    }
    );
 */
 
     gIter = data_set->resources;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *rsc = (resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "processing actions for rsc=%s", rsc->id);
         rsc->cmds->expand(rsc, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created resource-driven action list");
 
     /* pseudo action to distribute list of nodes with maintenance state update */
     add_maintenance_update(data_set);
 
     /* catch any non-resource specific actions */
     crm_trace("processing non-resource actions");
 
     gIter = data_set->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         if (action->rsc
             && action->node
             && action->node->details->shutdown
             && is_not_set(action->rsc->flags, pe_rsc_maintenance)
             && is_not_set(action->flags, pe_action_optional)
             && is_not_set(action->flags, pe_action_runnable)
             && crm_str_eq(action->task, RSC_STOP, TRUE)
             ) {
             /* Eventually we should just ignore the 'fence' case
              * But for now it's the best way to detect (in CTS) when
              * CIB resource updates are being lost
              */
             if (is_set(data_set->flags, pe_flag_have_quorum)
                 || data_set->no_quorum_policy == no_quorum_ignore) {
                 crm_crit("Cannot %s node '%s' because of %s:%s%s (%s)",
                          action->node->details->unclean ? "fence" : "shut down",
                          action->node->details->uname, action->rsc->id,
                          is_not_set(action->rsc->flags, pe_rsc_managed) ? " unmanaged" : " blocked",
                          is_set(action->rsc->flags, pe_rsc_failed) ? " failed" : "",
                          action->uuid);
             }
         }
 
         graph_element_from_action(action, data_set);
     }
 
     crm_log_xml_trace(data_set->graph, "created generic action list");
     crm_trace("Created transition graph %d.", transition_id);
 
     return TRUE;
 }
 
 void
 LogNodeActions(pe_working_set_t * data_set, gboolean terminal)
 {
     GListPtr gIter = NULL;
 
     for (gIter = data_set->actions; gIter != NULL; gIter = gIter->next) {
         char *node_name = NULL;
         char *task = NULL;
         action_t *action = (action_t *) gIter->data;
 
         if (action->rsc != NULL) {
             continue;
         } else if (is_set(action->flags, pe_action_optional)) {
             continue;
         }
 
         if (is_container_remote_node(action->node)) {
             node_name = crm_strdup_printf("%s (resource: %s)", action->node->details->uname, action->node->details->remote_rsc->container->id);
         } else if(action->node) {
             node_name = crm_strdup_printf("%s", action->node->details->uname);
         }
 
 
         if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
             task = strdup("Shutdown");
         } else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
             const char *op = g_hash_table_lookup(action->meta, "stonith_action");
             task = crm_strdup_printf("Fence (%s)", op);
         }
 
         if(task == NULL) {
             /* Nothing to report */
         } else if(terminal && action->reason) {
             printf(" * %s %s '%s'\n", task, node_name, action->reason);
         } else if(terminal) {
             printf(" * %s %s\n", task, node_name);
         } else if(action->reason) {
             crm_notice(" * %s %s '%s'\n", task, node_name, action->reason);
         } else {
             crm_notice(" * %s %s\n", task, node_name);
         }
 
         free(node_name);
         free(task);
     }
 }
 
 void
 cleanup_alloc_calculations(pe_working_set_t * data_set)
 {
     if (data_set == NULL) {
         return;
     }
 
     crm_trace("deleting %d order cons: %p",
               g_list_length(data_set->ordering_constraints), data_set->ordering_constraints);
     pe_free_ordering(data_set->ordering_constraints);
     data_set->ordering_constraints = NULL;
 
     crm_trace("deleting %d node cons: %p",
               g_list_length(data_set->placement_constraints), data_set->placement_constraints);
     pe_free_rsc_to_node(data_set->placement_constraints);
     data_set->placement_constraints = NULL;
 
     crm_trace("deleting %d inter-resource cons: %p",
               g_list_length(data_set->colocation_constraints), data_set->colocation_constraints);
     g_list_free_full(data_set->colocation_constraints, free);
     data_set->colocation_constraints = NULL;
 
     crm_trace("deleting %d ticket deps: %p",
               g_list_length(data_set->ticket_constraints), data_set->ticket_constraints);
     g_list_free_full(data_set->ticket_constraints, free);
     data_set->ticket_constraints = NULL;
 
     cleanup_calculations(data_set);
 }
diff --git a/pengine/regression.sh b/pengine/regression.sh
index d1a8a3fcf4..7dc3a04c55 100755
--- a/pengine/regression.sh
+++ b/pengine/regression.sh
@@ -1,878 +1,879 @@
 #!/bin/bash
 
  # Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  #
  # This program is free software; you can redistribute it and/or
  # modify it under the terms of the GNU General Public
  # License as published by the Free Software Foundation; either
  # version 2 of the License, or (at your option) any later version.
  #
  # This software is distributed in the hope that it will be useful,
  # but WITHOUT ANY WARRANTY; without even the implied warranty of
  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  # General Public License for more details.
  #
  # You should have received a copy of the GNU General Public
  # License along with this library; if not, write to the Free Software
  # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  #
 
 core=`dirname $0`
 . $core/regression.core.sh || exit 1
 
 create_mode="true"
 info Generating test outputs for these tests...
 # do_test file description
 info Done.
 echo ""
 
 info Performing the following tests from $io_dir
 create_mode="false"
 echo ""
 
 do_test simple1 "Offline     "
 do_test simple2 "Start       "
 do_test simple3 "Start 2     "
 do_test simple4 "Start Failed"
 do_test simple6 "Stop Start  "
 do_test simple7 "Shutdown    "
 #do_test simple8 "Stonith	"
 #do_test simple9 "Lower version"
 #do_test simple10 "Higher version"
 do_test simple11 "Priority (ne)"
 do_test simple12 "Priority (eq)"
 do_test simple8 "Stickiness"
 
 echo ""
 do_test group1 "Group		"
 do_test group2 "Group + Native	"
 do_test group3 "Group + Group	"
 do_test group4 "Group + Native (nothing)"
 do_test group5 "Group + Native (move)   "
 do_test group6 "Group + Group (move)    "
 do_test group7 "Group colocation"
 do_test group13 "Group colocation (cant run)"
 do_test group8 "Group anti-colocation"
 do_test group9 "Group recovery"
 do_test group10 "Group partial recovery"
 do_test group11 "Group target_role"
 do_test group14 "Group stop (graph terminated)"
 do_test group15 "-ve group colocation"
 do_test bug-1573 "Partial stop of a group with two children"
 do_test bug-1718 "Mandatory group ordering - Stop group_FUN"
 do_test bug-lf-2613 "Move group on failure"
 do_test bug-lf-2619 "Move group on clone failure"
 do_test group-fail "Ensure stop order is preserved for partially active groups"
 do_test group-unmanaged "No need to restart r115 because r114 is unmanaged"
 do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails"
 do_test group-dependents "Account for the location preferences of things colocated with a group"
 
 echo ""
 do_test rsc_dep1 "Must not     "
 do_test rsc_dep3 "Must         "
 do_test rsc_dep5 "Must not 3   "
 do_test rsc_dep7 "Must 3       "
 do_test rsc_dep10 "Must (but cant)"
 do_test rsc_dep2  "Must (running) "
 do_test rsc_dep8  "Must (running : alt) "
 do_test rsc_dep4  "Must (running + move)"
 do_test asymmetric "Asymmetric - require explicit location constraints"
 
 echo ""
 do_test orphan-0 "Orphan ignore"
 do_test orphan-1 "Orphan stop"
 do_test orphan-2 "Orphan stop, remove failcount"
 
 echo ""
 do_test params-0 "Params: No change"
 do_test params-1 "Params: Changed"
 do_test params-2 "Params: Resource definition"
 do_test params-4 "Params: Reload"
 do_test params-5 "Params: Restart based on probe digest"
 do_test novell-251689 "Resource definition change + target_role=stopped"
 do_test bug-lf-2106 "Restart all anonymous clone instances after config change"
 do_test params-6 "Params: Detect reload in previously migrated resource"
 do_test nvpair-id-ref "Support id-ref in nvpair with optional name"
 do_test not-reschedule-unneeded-monitor "Do not reschedule unneeded monitors while resource definitions have changed"
 
 echo ""
 do_test target-0 "Target Role : baseline"
 do_test target-1 "Target Role : master"
 do_test target-2 "Target Role : invalid"
 
 echo ""
 do_test base-score "Set a node's default score for all nodes"
 
 echo ""
 do_test date-1 "Dates" -t "2005-020"
 do_test date-2 "Date Spec - Pass" -t "2005-020T12:30"
 do_test date-3 "Date Spec - Fail" -t "2005-020T11:30"
 do_test origin "Timing of recurring operations" -t "2014-05-07 00:28:00" 
 do_test probe-0 "Probe (anon clone)"
 do_test probe-1 "Pending Probe"
 do_test probe-2 "Correctly re-probe cloned groups"
 do_test probe-3 "Probe (pending node)"
 do_test probe-4 "Probe (pending node + stopped resource)"
 do_test standby "Standby"
 do_test comments "Comments"
 
 echo ""
 do_test one-or-more-0 "Everything starts"
 do_test one-or-more-1 "Nothing starts because of A"
 do_test one-or-more-2 "D can start because of C"
 do_test one-or-more-3 "D cannot start because of B and C"
 do_test one-or-more-4 "D cannot start because of target-role"
 do_test one-or-more-5 "Start A and F even though C and D are stopped"
 do_test one-or-more-6 "Leave A running even though B is stopped"
 do_test one-or-more-7 "Leave A running even though C is stopped"
 do_test bug-5140-require-all-false "Allow basegrp:0 to stop"
 do_test clone-require-all-1 "clone B starts node 3 and 4"
 do_test clone-require-all-2 "clone B remains stopped everywhere"
 do_test clone-require-all-3 "clone B stops everywhere because A stops everywhere"
 do_test clone-require-all-4 "clone B remains on node 3 and 4 with only one instance of A remaining."
 do_test clone-require-all-5 "clone B starts on node 1 3 and 4"
 do_test clone-require-all-6 "clone B remains active after shutting down instances of A"
 do_test clone-require-all-7 "clone A and B both start at the same time. all instances of A start before B."
 do_test clone-require-all-no-interleave-1 "C starts everywhere after A and B"
 do_test clone-require-all-no-interleave-2 "C starts on nodes 1, 2, and 4 with only one active instance of B"
 do_test clone-require-all-no-interleave-3 "C remains active when instance of B is stopped on one node and started on another."
 do_test one-or-more-unrunnnable-instances "Avoid dependencies on instances that won't ever be started"
 
 echo ""
 do_test order1 "Order start 1     "
 do_test order2 "Order start 2     "
 do_test order3 "Order stop	  "
 do_test order4 "Order (multiple)  "
 do_test order5 "Order (move)  "
 do_test order6 "Order (move w/ restart)  "
 do_test order7 "Order (mandatory)  "
 do_test order-optional "Order (score=0)  "
 do_test order-required "Order (score=INFINITY)  "
 do_test bug-lf-2171 "Prevent group start when clone is stopped"
 do_test order-clone "Clone ordering should be able to prevent startup of dependent clones"
 do_test order-sets "Ordering for resource sets"
 do_test order-serialize "Serialize resources without inhibiting migration"
 do_test order-serialize-set "Serialize a set of resources without inhibiting migration"
 do_test clone-order-primitive "Order clone start after a primitive"
 do_test clone-order-16instances "Verify ordering of 16 cloned resources"
 do_test order-optional-keyword "Order (optional keyword)"
 do_test order-mandatory "Order (mandatory keyword)"
 do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones"
 do_test ordered-set-basic-startup "Constraint set with default order settings."
 do_test ordered-set-natural "Allow natural set ordering"
 do_test order-wrong-kind "Order (error)"
 
 echo ""
 do_test coloc-loop "Colocation - loop"
 do_test coloc-many-one "Colocation - many-to-one"
 do_test coloc-list "Colocation - many-to-one with list"
 do_test coloc-group "Colocation - groups"
 do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation"
 do_test coloc-attr "Colocation based on node attributes"
 do_test coloc-negative-group "Negative colocation with a group"
 do_test coloc-intra-set "Intra-set colocation"
 do_test bug-lf-2435 "Colocation sets with a negative score"
 do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependent must stop"
 do_test coloc_fp_logic "Verify floating point calculations in colocation are working"
 do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc."
 do_test colo_slave_w_native  "cl#5070 - Verify promotion order is affected when colocating slave to native rsc."
 do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node"
 do_test anti-colocation-master "Organize order of actions for master resources in anti-colocations"
 do_test anti-colocation-slave "Organize order of actions for slave resources in anti-colocations"
 do_test enforce-colo1 "Always enforce B with A INFINITY."
 do_test complex_enforce_colo "Always enforce B with A INFINITY. (make sure heat-engine stops)"
 
 echo ""
 do_test rsc-sets-seq-true "Resource Sets - sequential=false"
 do_test rsc-sets-seq-false "Resource Sets - sequential=true"
 do_test rsc-sets-clone "Resource Sets - Clone"
 do_test rsc-sets-master "Resource Sets - Master"
 do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)"
 
 #echo ""
 #do_test agent1 "version: lt (empty)"
 #do_test agent2 "version: eq	"
 #do_test agent3 "version: gt	"
 
 echo ""
 do_test attrs1 "string: eq (and)     "
 do_test attrs2 "string: lt / gt (and)"
 do_test attrs3 "string: ne (or)      "
 do_test attrs4 "string: exists       "
 do_test attrs5 "string: not_exists   "
 do_test attrs6 "is_dc: true          "
 do_test attrs7 "is_dc: false         "
 do_test attrs8 "score_attribute      "
 do_test per-node-attrs "Per node resource parameters"
 
 echo ""
 do_test mon-rsc-1 "Schedule Monitor - start"
 do_test mon-rsc-2 "Schedule Monitor - move "
 do_test mon-rsc-3 "Schedule Monitor - pending start     "
 do_test mon-rsc-4 "Schedule Monitor - move/pending start"
 
 echo ""
 do_test rec-rsc-0 "Resource Recover - no start     "
 do_test rec-rsc-1 "Resource Recover - start        "
 do_test rec-rsc-2 "Resource Recover - monitor      "
 do_test rec-rsc-3 "Resource Recover - stop - ignore"
 do_test rec-rsc-4 "Resource Recover - stop - block "
 do_test rec-rsc-5 "Resource Recover - stop - fence "
 do_test rec-rsc-6 "Resource Recover - multiple - restart"
 do_test rec-rsc-7 "Resource Recover - multiple - stop   "
 do_test rec-rsc-8 "Resource Recover - multiple - block  "
 do_test rec-rsc-9 "Resource Recover - group/group"
 do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor"
 do_test stop-failure-no-quorum "Stop failure without quorum"
 do_test stop-failure-no-fencing "Stop failure without fencing available"
 do_test stop-failure-with-fencing "Stop failure with fencing available"
 do_test multiple-active-block-group "Support of multiple-active=block for resource groups"
 do_test multiple-monitor-one-failed "Consider resource failed if any of the configured monitor operations failed"
 
 echo ""
 do_test quorum-1 "No quorum - ignore"
 do_test quorum-2 "No quorum - freeze"
 do_test quorum-3 "No quorum - stop  "
 do_test quorum-4 "No quorum - start anyway"
 do_test quorum-5 "No quorum - start anyway (group)"
 do_test quorum-6 "No quorum - start anyway (clone)"
 do_test bug-cl-5212 "No promotion with no-quorum-policy=freeze"
 
 echo ""
 do_test rec-node-1 "Node Recover - Startup   - no fence"
 do_test rec-node-2 "Node Recover - Startup   - fence   "
 do_test rec-node-3 "Node Recover - HA down   - no fence"
 do_test rec-node-4 "Node Recover - HA down   - fence   "
 do_test rec-node-5 "Node Recover - CRM down  - no fence"
 do_test rec-node-6 "Node Recover - CRM down  - fence   "
 do_test rec-node-7 "Node Recover - no quorum - ignore  "
 do_test rec-node-8 "Node Recover - no quorum - freeze  "
 do_test rec-node-9 "Node Recover - no quorum - stop    "
 do_test rec-node-10 "Node Recover - no quorum - stop w/fence"
 do_test rec-node-11 "Node Recover - CRM down w/ group - fence   "
 do_test rec-node-12 "Node Recover - nothing active - fence   "
 do_test rec-node-13 "Node Recover - failed resource + shutdown - fence   "
 do_test rec-node-15 "Node Recover - unknown lrm section"
 do_test rec-node-14 "Serialize all stonith's"
 
 echo ""
 do_test multi1 "Multiple Active (stop/start)"
 
 echo ""
 do_test migrate-begin     "Normal migration"
 do_test migrate-success   "Completed migration"
 do_test migrate-partial-1 "Completed migration, missing stop on source"
 do_test migrate-partial-2 "Successful migrate_to only"
 do_test migrate-partial-3 "Successful migrate_to only, target down"
 do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from"
 do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership"
 
 do_test migrate-fail-2 "Failed migrate_from"
 do_test migrate-fail-3 "Failed migrate_from + stop on source"
 do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-5 "Failed migrate_from + stop on source and target"
 
 do_test migrate-fail-6 "Failed migrate_to"
 do_test migrate-fail-7 "Failed migrate_to + stop on source"
 do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-9 "Failed migrate_to + stop on source and target"
 
 do_test migrate-stop "Migration in a stopping stack"
 do_test migrate-start "Migration in a starting stack"
 do_test migrate-stop_start "Migration in a restarting stack"
 do_test migrate-stop-complex "Migration in a complex stopping stack"
 do_test migrate-start-complex "Migration in a complex starting stack"
 do_test migrate-stop-start-complex "Migration in a complex moving stack"
 do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown"
 
 do_test migrate-1 "Migrate (migrate)"
 do_test migrate-2 "Migrate (stable)"
 do_test migrate-3 "Migrate (failed migrate_to)"
 do_test migrate-4 "Migrate (failed migrate_from)"
 do_test novell-252693 "Migration in a stopping stack"
 do_test novell-252693-2 "Migration in a starting stack"
 do_test novell-252693-3 "Non-Migration in a starting and stopping stack"
 do_test bug-1820 "Migration in a group"
 do_test bug-1820-1 "Non-migration in a group"
 do_test migrate-5 "Primitive migration with a clone"
 do_test migrate-fencing "Migration after Fencing"
 do_test migrate-both-vms "Migrate two VMs that have no colocation"
 
 do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B."
 do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B"
 do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both"
 do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable"
 do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable"
 do_test 6-migrate-group "Advanced migrate logic, migrate a group"
 do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false"
 do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping"
 do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping"
 do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A"
 do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping"
 
 do_test a-promote-then-b-migrate "A promote then B start. migrate B"
 do_test a-demote-then-b-migrate "A demote then B stop. migrate B"
 
 do_test migrate-versioned "Disable migration for versioned resources"
 
 #echo ""
 #do_test complex1 "Complex	"
 
 do_test bug-lf-2422 "Dependency on partially active group - stop ocfs:*"
 
 echo ""
 do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node"
 do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones"
 do_test clone-anon-failcount "Merge failcounts for anonymous clones"
 do_test inc0 "Incarnation start"
 do_test inc1 "Incarnation start order"
 do_test inc2 "Incarnation silent restart, stop, move"
 do_test inc3 "Inter-incarnation ordering, silent restart, stop, move"
 do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)"
 do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)"
 do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)"
 do_test inc7 "Clone colocation"
 do_test inc8 "Clone anti-colocation"
 do_test inc9 "Non-unique clone"
 do_test inc10 "Non-unique clone (stop)"
 do_test inc11 "Primitive colocation with clones"
 do_test inc12 "Clone shutdown"
 do_test cloned-group "Make sure only the correct number of cloned groups are started"
 do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder"
 do_test clone-no-shuffle "Don't prioritize allocation of instances that must be moved"
 do_test clone-max-zero "Orphan processing with clone-max=0"
 do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"
 do_test bug-lf-2160 "Don't shuffle clones due to colocation"
 do_test bug-lf-2213 "clone-node-max enforcement for cloned groups"
 do_test bug-lf-2153 "Clone ordering constraints"
 do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable"
 do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone"
 do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)"
 do_test clone-colocate-instance-2 "Colocation with a specific clone instance"
 do_test clone-order-instance "Ordering with specific clone instances"
 do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation"
 do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups"
 do_test bug-lf-2544 "Balanced clone placement"
 do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0"
 do_test bug-lf-2574 "Avoid clone shuffle"
 do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start"
 do_test bug-cl-5168 "Don't shuffle clones"
 do_test bug-cl-5170 "Prevent clone from starting with on-fail=block"
 do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block"
 do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)"
 
 echo ""
 do_test cloned_start_one  "order first clone then clone... first clone_min=2"
 do_test cloned_start_two  "order first clone then clone... first clone_min=2"
 do_test cloned_stop_one   "order first clone then clone... first clone_min=2"
 do_test cloned_stop_two   "order first clone then clone... first clone_min=2"
 do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_one  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_two  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_start_one "order first clone then primitive... first clone_min=2"
 do_test clone_min_start_two "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_all  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_one  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_two  "order first clone then primitive... first clone_min=2"
 
 echo ""
 do_test unfence-startup "Clean unfencing"
 do_test unfence-definition "Unfencing when the agent changes"
 do_test unfence-parameters "Unfencing when the agent parameters changes"
 
 echo ""
 do_test master-0 "Stopped -> Slave"
 do_test master-1 "Stopped -> Promote"
 do_test master-2 "Stopped -> Promote : notify"
 do_test master-3 "Stopped -> Promote : master location"
 do_test master-4 "Started -> Promote : master location"
 do_test master-5 "Promoted -> Promoted"
 do_test master-6 "Promoted -> Promoted (2)"
 do_test master-7 "Promoted -> Fenced"
 do_test master-8 "Promoted -> Fenced -> Moved"
 do_test master-9 "Stopped + Promotable + No quorum"
 do_test master-10 "Stopped -> Promotable : notify with monitor"
 do_test master-11 "Stopped -> Promote : colocation"
 do_test novell-239082 "Demote/Promote ordering"
 do_test novell-239087 "Stable master placement"
 do_test master-12 "Promotion based solely on rsc_location constraints"
 do_test master-13 "Include preferences of colocated resources when placing master"
 do_test master-demote "Ordering when actions depends on demoting a slave resource"
 do_test master-ordering "Prevent resources from starting that need a master"
 do_test bug-1765 "Master-Master Colocation (dont stop the slaves)"
 do_test master-group "Promotion of cloned groups"
 do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily"
 do_test master-failed-demote "Don't retry failed demote actions"
 do_test master-failed-demote-2 "Don't retry failed demote actions (notify=false)"
 do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does"
 do_test master-reattach "Re-attach to a running master"
 do_test master-allow-start "Don't include master score if it would prevent allocation"
 do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints"
 do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly"
 do_test master-role "Prevent target-role from promoting more than master-max instances"
 do_test bug-lf-2358 "Master-Master anti-colocation"
 do_test master-promotion-constraint "Mandatory master colocation constraints"
 do_test unmanaged-master "Ensure role is preserved for unmanaged resources"
 do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters"
 do_test master-demote-2 "Demote does not clear past failure"
 do_test master-move "Move master based on failure of colocated group"
 do_test master-probed-score "Observe the promotion score of probed resources"
 do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint"
 do_test colocation_constraint_stops_slave  "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint"
 do_test order_constraint_stops_master      "cl#5054 - Ensure master is demoted when stopped by order constraint"
 do_test order_constraint_stops_slave       "cl#5054 - Ensure slave is not demoted when stopped by order constraint"
 do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion."
 do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive"
 do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score"
 do_test master-demote-block "Block promotion if demote fails with on-fail=block"
 do_test master-dependent-ban "Don't stop instances from being active because a dependent is banned from that host"
 do_test master-stop "Stop instances due to location constraint with role=Started"
 do_test master-partially-demoted-group "Allow partially demoted group to finish demoting"
 do_test bug-cl-5213 "Ensure role colocation with -INFINITY is enforced"
 do_test bug-cl-5219 "Allow unrelated resources with a common colocation target to remain promoted"
 do_test master-asymmetrical-order "Fix the behaviors of multi-state resources with asymmetrical ordering"
 do_test master-notify "Master promotion with notifies"
 
 echo ""
 do_test history-1 "Correctly parse stateful-1 resource state"
 
 echo ""
 do_test managed-0 "Managed (reference)"
 do_test managed-1 "Not managed - down "
 do_test managed-2 "Not managed - up   "
 do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource"
 do_test bug-5028-detach "Ensure detach still works"
 do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack"
 do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged "
 do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged "
 do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged "
 do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged "
 do_test unmanaged-block-restart "Block restart of resources if any dependent resource in a group is unmanaged"
 
 echo ""
 do_test interleave-0 "Interleave (reference)"
 do_test interleave-1 "coloc - not interleaved"
 do_test interleave-2 "coloc - interleaved   "
 do_test interleave-3 "coloc - interleaved (2)"
 do_test interleave-pseudo-stop "Interleaved clone during stonith"
 do_test interleave-stop "Interleaved clone during stop"
 do_test interleave-restart "Interleaved clone during dependency restart"
 
 echo ""
 do_test notify-0 "Notify reference"
 do_test notify-1 "Notify simple"
 do_test notify-2 "Notify simple, confirm"
 do_test notify-3 "Notify move, confirm"
 do_test novell-239079 "Notification priority"
 #do_test notify-2 "Notify - 764"
 
 echo ""
 do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition"
 do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1"
 do_test 696 "OSDL #696 - CRM starts stonith RA without monitor"
 do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop"
 do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3"
 do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1"
 do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id"
 do_test 829 "OSDL #829"
 do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted"
 do_test 994-2 "OSDL #994 - with a dependent resource"
 do_test 1360 "OSDL #1360 - Clone stickiness"
 do_test 1484 "OSDL #1484 - on_fail=stop"
 do_test 1494 "OSDL #1494 - Clone stability"
 do_test unrunnable-1 "Unrunnable"
 do_test unrunnable-2 "Unrunnable 2"
 do_test stonith-0 "Stonith loop - 1"
 do_test stonith-1 "Stonith loop - 2"
 do_test stonith-2 "Stonith loop - 3"
 do_test stonith-3 "Stonith startup"
 do_test stonith-4 "Stonith node state"
 do_test bug-1572-1 "Recovery of groups depending on master/slave"
 do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted"
 do_test bug-1685 "Depends-on-master ordering"
 do_test bug-1822 "Don't promote partially active groups"
 do_test bug-pm-11 "New resource added to a m/s group"
 do_test bug-pm-12 "Recover only the failed portion of a cloned group"
 do_test bug-n-387749 "Don't shuffle clone instances"
 do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped"
 do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node"
 do_test bug-lf-1920 "Correctly handle probes that find active resources"
 do_test bnc-515172 "Location constraint with multiple expressions"
 do_test colocate-primitive-with-clone "Optional colocation with a clone"
 do_test use-after-free-merge "Use-after-free in native_merge_weights"
 do_test bug-lf-2551 "STONITH ordering for stop"
 do_test bug-lf-2606 "Stonith implies demote"
 do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults"
 do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering"
 do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false"
 do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false"
 do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false"
 do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts."
 do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false"
 do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false."
 do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false."
 do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false"
 do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true"
 do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources."
 do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases"
 do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload"
 do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change."
 do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart"
 do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed."
 do_test failcount "Ensure failcounts are correctly expired"
 do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present"
 do_test per-op-failcount "Ensure per-operation failcount is handled and not passed to fence agent"
 do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart"
 do_test monitor-onfail-stop    "bug-5058 - Monitor failure wiht on-fail set to stop"
 do_test bug-5059 "No need to restart p_stateful1:*"
 do_test bug-5069-op-enabled  "Test on-fail=ignore with failure when monitor is enabled."
 do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled."
 do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections"
 do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block"
 do_test asymmetrical-order-move "Respect asymmetrical ordering when trying to move resources"
 do_test start-then-stop-with-unfence "Avoid graph loop with start-then-stop constraint plus unfencing"
 
 do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc."
 do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith."
 do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group"
 do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group"
 do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)."
 do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)"
 do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group."
 do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs."
 do_test probe-timeout "cl#5099 - Default probe timeout"
 
 do_test concurrent-fencing "Allow performing fencing operations in parallel"
 
 echo ""
 do_test systemhealth1  "System Health ()               #1"
 do_test systemhealth2  "System Health ()               #2"
 do_test systemhealth3  "System Health ()               #3"
 do_test systemhealthn1 "System Health (None)           #1"
 do_test systemhealthn2 "System Health (None)           #2"
 do_test systemhealthn3 "System Health (None)           #3"
 do_test systemhealthm1 "System Health (Migrate On Red) #1"
 do_test systemhealthm2 "System Health (Migrate On Red) #2"
 do_test systemhealthm3 "System Health (Migrate On Red) #3"
 do_test systemhealtho1 "System Health (Only Green)     #1"
 do_test systemhealtho2 "System Health (Only Green)     #2"
 do_test systemhealtho3 "System Health (Only Green)     #3"
 do_test systemhealthp1 "System Health (Progessive)     #1"
 do_test systemhealthp2 "System Health (Progessive)     #2"
 do_test systemhealthp3 "System Health (Progessive)     #3"
 
 echo ""
 do_test utilization "Placement Strategy - utilization"
 do_test minimal     "Placement Strategy - minimal"
 do_test balanced    "Placement Strategy - balanced"
 
 echo ""
 do_test placement-stickiness "Optimized Placement Strategy - stickiness"
 do_test placement-priority   "Optimized Placement Strategy - priority"
 do_test placement-location   "Optimized Placement Strategy - location"
 do_test placement-capacity   "Optimized Placement Strategy - capacity"
 
 echo ""
 do_test utilization-order1 "Utilization Order - Simple"
 do_test utilization-order2 "Utilization Order - Complex"
 do_test utilization-order3 "Utilization Order - Migrate"
 do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)"
 do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3"
 do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)"
 do_test load-stopped-loop-2 "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering"
 
 echo ""
 do_test colocated-utilization-primitive-1 "Colocated Utilization - Primitive"
 do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the most capable node"
 do_test colocated-utilization-group "Colocated Utilization - Group"
 do_test colocated-utilization-clone "Colocated Utilization - Clone"
 
 do_test utilization-check-allowed-nodes "Only check the capacities of the nodes that can run the resource"
 
 echo ""
 do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
 do_test node-maintenance-1 "cl#5128 - Node maintenance"
 do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)"
 do_test shutdown-maintenance-node "Do not fence a maintenance node if it shuts down cleanly"
 
 do_test rsc-maintenance "Per-resource maintenance"
 
 echo ""
 do_test not-installed-agent "The resource agent is missing"
 do_test not-installed-tools "Something the resource agent needs is missing"
 
 echo ""
 do_test stopped-monitor-00	"Stopped Monitor - initial start"
 do_test stopped-monitor-01	"Stopped Monitor - failed started"
 do_test stopped-monitor-02	"Stopped Monitor - started multi-up"
 do_test stopped-monitor-03	"Stopped Monitor - stop started"
 do_test stopped-monitor-04	"Stopped Monitor - failed stop"
 do_test stopped-monitor-05	"Stopped Monitor - start unmanaged"
 do_test stopped-monitor-06	"Stopped Monitor - unmanaged multi-up"
 do_test stopped-monitor-07	"Stopped Monitor - start unmanaged multi-up"
 do_test stopped-monitor-08	"Stopped Monitor - migrate"
 do_test stopped-monitor-09	"Stopped Monitor - unmanage started"
 do_test stopped-monitor-10	"Stopped Monitor - unmanaged started multi-up"
 do_test stopped-monitor-11	"Stopped Monitor - stop unmanaged started"
 do_test stopped-monitor-12	"Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")"
 do_test stopped-monitor-20	"Stopped Monitor - initial stop"
 do_test stopped-monitor-21	"Stopped Monitor - stopped single-up"
 do_test stopped-monitor-22	"Stopped Monitor - stopped multi-up"
 do_test stopped-monitor-23	"Stopped Monitor - start stopped"
 do_test stopped-monitor-24	"Stopped Monitor - unmanage stopped"
 do_test stopped-monitor-25	"Stopped Monitor - unmanaged stopped multi-up"
 do_test stopped-monitor-26	"Stopped Monitor - start unmanaged stopped"
 do_test stopped-monitor-27	"Stopped Monitor - unmanaged stopped multi-up (target-role="Started")"
 do_test stopped-monitor-30	"Stopped Monitor - new node started"
 do_test stopped-monitor-31	"Stopped Monitor - new node stopped"
 
 echo""
 do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)"
 do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)"
 do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)"
 do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)"
 do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)"
 do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)"
 do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)"
 do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)"
 do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)"
 do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)"
 do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)"
 do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)"
 
 do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)"
 do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)"
 do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)"
 do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)"
 do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)"
 do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)"
 do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)"
 do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)"
 do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)"
 do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)"
 do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)"
 do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)"
 do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)"
 do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)"
 do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)"
 do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)"
 do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)"
 do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)"
 do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)"
 do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)"
 do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)"
 do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)"
 do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)"
 
 do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)"
 do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)"
 do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)"
 do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)"
 do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)"
 do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)"
 do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)"
 do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)"
 do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)"
 do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)"
 do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)"
 do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)"
 do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)"
 do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)"
 do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)"
 do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)"
 do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)"
 do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)"
 do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)"
 do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)"
 do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)"
 do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)"
 do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)"
 
 do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)"
 do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)"
 do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)"
 do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)"
 do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)"
 do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)"
 do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)"
 do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)"
 do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)"
 do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)"
 do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)"
 do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)"
 do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)"
 do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)"
 do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)"
 do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)"
 do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)"
 do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)"
 do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)"
 do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)"
 do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)"
 do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)"
 do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)"
 
 do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)"
 do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)"
 do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)"
 do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)"
 do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)"
 do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)"
 do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)"
 do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)"
 do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)"
 do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)"
 do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)"
 do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)"
 
 echo ""
 do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)"
 do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)"
 do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)"
 do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)"
 do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)"
 
 do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)"
 do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)"
 do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)"
 do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)"
 do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)"
 
 do_test cluster-specific-params "Cluster-specific instance attributes based on rules"
 do_test site-specific-params "Site-specific instance attributes based on rules"
 
 echo ""
 do_test template-1 "Template - 1"
 do_test template-2 "Template - 2"
 do_test template-3 "Template - 3 (merge operations)"
 
 do_test template-coloc-1 "Template - Colocation 1"
 do_test template-coloc-2 "Template - Colocation 2"
 do_test template-coloc-3 "Template - Colocation 3"
 do_test template-order-1 "Template - Order 1"
 do_test template-order-2 "Template - Order 2"
 do_test template-order-3 "Template - Order 3"
 do_test template-ticket  "Template - Ticket"
 
 do_test template-rsc-sets-1  "Template - Resource Sets 1"
 do_test template-rsc-sets-2  "Template - Resource Sets 2"
 do_test template-rsc-sets-3  "Template - Resource Sets 3"
 do_test template-rsc-sets-4  "Template - Resource Sets 4"
 
 do_test template-clone-primitive "Cloned primitive from template"
 do_test template-clone-group     "Cloned group from template"
 
 do_test location-sets-templates "Resource sets and templates - Location"
 
 do_test tags-coloc-order-1 "Tags - Colocation and Order (Simple)"
 do_test tags-coloc-order-2 "Tags - Colocation and Order (Resource Sets with Templates)"
 do_test tags-location      "Tags - Location"
 do_test tags-ticket        "Tags - Ticket"
 
 echo ""
 do_test container-1 "Container - initial"
 do_test container-2 "Container - monitor failed"
 do_test container-3 "Container - stop failed"
 do_test container-4 "Container - reached migration-threshold"
 do_test container-group-1 "Container in group - initial"
 do_test container-group-2 "Container in group - monitor failed"
 do_test container-group-3 "Container in group - stop failed"
 do_test container-group-4 "Container in group - reached migration-threshold"
 do_test container-is-remote-node "Place resource within container when container is remote-node"
 do_test bug-rh-1097457 "Kill user defined container/contents ordering"
 do_test bug-cl-5247 "Graph loop when recovering m/s resource in a container"
 
 do_test bundle-order-startup "Bundle startup ordering"
 do_test bundle-order-partial-start "Bundle startup ordering when some dependancies are already running"
 do_test bundle-order-partial-start-2 "Bundle startup ordering when some dependancies and the container are already running"
 do_test bundle-order-stop    "Bundle stop ordering"
 do_test bundle-order-partial-stop "Bundle startup ordering when some dependancies are already stopped"
 
 do_test bundle-order-startup-clone "Prevent startup because bundle isn't promoted"
 do_test bundle-order-startup-clone-2 "Bundle startup with clones"
 do_test bundle-order-stop-clone "Stop bundle because clone is stopping"
 do_test bundle-nested-colocation "Colocation of nested connection resources"
 
 echo ""
 do_test whitebox-fail1 "Fail whitebox container rsc."
 do_test whitebox-fail2 "Fail whitebox container rsc lrmd connection."
 do_test whitebox-fail3 "Failed containers should not run nested on remote nodes."
 do_test whitebox-start "Start whitebox container with resources assigned to it"
 do_test whitebox-stop "Stop whitebox container with resources assigned to it"
 do_test whitebox-move "Move whitebox container with resources assigned to it"
 do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource"
 do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established"
 do_test whitebox-ms-ordering-move "Stop/Start cycle within a moving container"
 do_test whitebox-orphaned    "Properly shutdown orphaned whitebox container"
 do_test whitebox-orphan-ms   "Properly tear down orphan ms resources on remote-nodes"
 do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start."
 do_test whitebox-migrate1 "Migrate both container and connection resource"
 do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced"
 do_test whitebox-nested-group "Verify guest remote-node works nested in a group"
 do_test guest-node-host-dies "Verify guest node is recovered if host goes away"
 
 echo ""
 do_test remote-startup-probes  "Baremetal remote-node startup probes"
 do_test remote-startup         "Startup a newly discovered remote-nodes with no status."
 do_test remote-fence-unclean   "Fence unclean baremetal remote-node"
 do_test remote-fence-unclean2  "Fence baremetal remote-node after cluster node fails and connection can not be recovered"
 do_test remote-fence-unclean-3 "Probe failed remote nodes (triggers fencing)"
 do_test remote-move            "Move remote-node connection resource"
 do_test remote-disable         "Disable a baremetal remote-node"
 do_test remote-probe-disable   "Probe then stop a baremetal remote-node"
 do_test remote-orphaned        "Properly shutdown orphaned connection resource"
 do_test remote-orphaned2       "verify we can handle orphaned remote connections with active resources on the remote"
 do_test remote-recover         "Recover connection resource after cluster-node fails."
 do_test remote-stale-node-entry "Make sure we properly handle leftover remote-node entries in the node section"
 do_test remote-partial-migrate  "Make sure partial migrations are handled before ops on the remote node."
 do_test remote-partial-migrate2 "Make sure partial migration target is prefered for remote connection."
 do_test remote-recover-fail     "Make sure start failure causes fencing if rsc are active on remote."
 do_test remote-start-fail       "Make sure a start failure does not result in fencing if no active resources are on remote."
 do_test remote-unclean2         "Make monitor failure always results in fencing, even if no rsc are active on remote."
 do_test remote-fence-before-reconnect "Fence before clearing recurring monitor failure"
 do_test remote-recovery		"Recover remote connections before attempting demotion"
 do_test remote-recover-connection "Optimistically recovery of only the connection"
 do_test remote-recover-all        "Fencing when the connection has no home"
 do_test remote-recover-no-resources   "Fencing when the connection has no home and no active resources"
 do_test remote-recover-unknown        "Fencing when the connection has no home and the remote has no operation history"
+do_test remote-reconnect-delay        "Waiting for remote reconnect interval to expire"
 
 echo ""
 do_test resource-discovery      "Exercises resource-discovery location constraint option."
 do_test rsc-discovery-per-node  "Disable resource discovery per node"
 
 echo ""
 do_test isolation-start-all   "Start docker isolated resources."
 do_test isolation-restart-all "Restart docker isolated resources."
 do_test isolation-clone       "Cloned isolated primitive."
 
 echo ""
 do_test versioned-resources     "Start resources with #ra-version rules"
 do_test restart-versioned       "Restart resources on #ra-version change"
 do_test reload-versioned        "Reload resources on #ra-version change"
 
 echo ""
 do_test versioned-operations-1  "Use #ra-version to configure operations of native resources"
 do_test versioned-operations-2  "Use #ra-version to configure operations of stonith resources"
 do_test versioned-operations-3  "Use #ra-version to configure operations of master/slave resources"
 do_test versioned-operations-4  "Use #ra-version to configure operations of groups of the resources"
 
 echo ""
 test_results
diff --git a/pengine/test10/remote-fence-unclean-3.dot b/pengine/test10/remote-fence-unclean-3.dot
index 14adaefdd5..b32b77e3a7 100644
--- a/pengine/test10/remote-fence-unclean-3.dot
+++ b/pengine/test10/remote-fence-unclean-3.dot
@@ -1,14 +1,19 @@
 digraph "g" {
+"all_stopped" -> "fence1_start_0 overcloud-controller-0" [ style = bold]
 "all_stopped" [ style=bold color="green" fontcolor="orange"]
 "fence1_monitor_0 overcloud-controller-0" -> "fence1_start_0 overcloud-controller-0" [ style = bold]
 "fence1_monitor_0 overcloud-controller-0" [ style=bold color="green" fontcolor="black"]
 "fence1_monitor_0 overcloud-controller-1" -> "fence1_start_0 overcloud-controller-0" [ style = bold]
 "fence1_monitor_0 overcloud-controller-1" [ style=bold color="green" fontcolor="black"]
 "fence1_monitor_0 overcloud-controller-2" -> "fence1_start_0 overcloud-controller-0" [ style = bold]
 "fence1_monitor_0 overcloud-controller-2" [ style=bold color="green" fontcolor="black"]
 "fence1_monitor_60000 overcloud-controller-0" [ style=bold color="green" fontcolor="black"]
 "fence1_start_0 overcloud-controller-0" -> "fence1_monitor_60000 overcloud-controller-0" [ style = bold]
 "fence1_start_0 overcloud-controller-0" [ style=bold color="green" fontcolor="black"]
 "overcloud-novacompute-0_stop_0 overcloud-controller-0" -> "all_stopped" [ style = bold]
 "overcloud-novacompute-0_stop_0 overcloud-controller-0" [ style=bold color="green" fontcolor="black"]
+"stonith 'reboot' overcloud-novacompute-0" -> "stonith_complete" [ style = bold]
+"stonith 'reboot' overcloud-novacompute-0" [ style=bold color="green" fontcolor="black"]
+"stonith_complete" -> "all_stopped" [ style = bold]
+"stonith_complete" [ style=bold color="green" fontcolor="orange"]
 }
diff --git a/pengine/test10/remote-fence-unclean-3.exp b/pengine/test10/remote-fence-unclean-3.exp
index 64e5a622f7..b7bb7358e2 100644
--- a/pengine/test10/remote-fence-unclean-3.exp
+++ b/pengine/test10/remote-fence-unclean-3.exp
@@ -1,85 +1,114 @@
 <transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
   <synapse id="0">
     <action_set>
-      <rsc_op id="47" operation="monitor" operation_key="fence1_monitor_60000" on_node="overcloud-controller-0" on_node_uuid="1">
+      <rsc_op id="48" operation="monitor" operation_key="fence1_monitor_60000" on_node="overcloud-controller-0" on_node_uuid="1">
         <primitive id="fence1" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="overcloud-controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000"  multicast_address="225.0.0.2"/>
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="46" operation="start" operation_key="fence1_start_0" on_node="overcloud-controller-0" on_node_uuid="1"/>
+        <rsc_op id="47" operation="start" operation_key="fence1_start_0" on_node="overcloud-controller-0" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="1">
     <action_set>
-      <rsc_op id="46" operation="start" operation_key="fence1_start_0" on_node="overcloud-controller-0" on_node_uuid="1">
+      <rsc_op id="47" operation="start" operation_key="fence1_start_0" on_node="overcloud-controller-0" on_node_uuid="1">
         <primitive id="fence1" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_on_node="overcloud-controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000"  multicast_address="225.0.0.2"/>
       </rsc_op>
     </action_set>
     <inputs>
+      <trigger>
+        <pseudo_event id="42" operation="all_stopped" operation_key="all_stopped"/>
+      </trigger>
       <trigger>
         <rsc_op id="43" operation="monitor" operation_key="fence1_monitor_0" on_node="overcloud-controller-0" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <rsc_op id="44" operation="monitor" operation_key="fence1_monitor_0" on_node="overcloud-controller-1" on_node_uuid="2"/>
       </trigger>
       <trigger>
         <rsc_op id="45" operation="monitor" operation_key="fence1_monitor_0" on_node="overcloud-controller-2" on_node_uuid="3"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="2">
     <action_set>
       <rsc_op id="45" operation="monitor" operation_key="fence1_monitor_0" on_node="overcloud-controller-2" on_node_uuid="3">
         <primitive id="fence1" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_on_node="overcloud-controller-2" CRM_meta_on_node_uuid="3" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000"  multicast_address="225.0.0.2"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="3">
     <action_set>
       <rsc_op id="44" operation="monitor" operation_key="fence1_monitor_0" on_node="overcloud-controller-1" on_node_uuid="2">
         <primitive id="fence1" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_on_node="overcloud-controller-1" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000"  multicast_address="225.0.0.2"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="4">
     <action_set>
       <rsc_op id="43" operation="monitor" operation_key="fence1_monitor_0" on_node="overcloud-controller-0" on_node_uuid="1">
         <primitive id="fence1" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_on_node="overcloud-controller-0" CRM_meta_on_node_uuid="1" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000"  multicast_address="225.0.0.2"/>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="5">
     <action_set>
       <rsc_op id="30" operation="stop" operation_key="overcloud-novacompute-0_stop_0" on_node="overcloud-controller-0" on_node_uuid="1">
         <primitive id="overcloud-novacompute-0" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_name="stop" CRM_meta_on_node="overcloud-controller-0" CRM_meta_on_node_uuid="1" CRM_meta_timeout="60000"  reconnect_interval="240"/>
         <downed>
           <node id="overcloud-novacompute-0"/>
         </downed>
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="6">
+    <action_set>
+      <pseudo_event id="209" operation="stonith_complete" operation_key="stonith_complete">
+        <attributes />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <crm_event id="46" operation="stonith" operation_key="stonith-overcloud-novacompute-0-reboot" on_node="overcloud-novacompute-0" on_node_uuid="overcloud-novacompute-0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="7">
+    <action_set>
+      <crm_event id="46" operation="stonith" operation_key="stonith-overcloud-novacompute-0-reboot" on_node="overcloud-novacompute-0" on_node_uuid="overcloud-novacompute-0">
+        <attributes CRM_meta_compute_role="true" CRM_meta_on_node="overcloud-novacompute-0" CRM_meta_on_node_uuid="overcloud-novacompute-0" CRM_meta_stonith_action="reboot" />
+        <downed>
+          <node id="overcloud-novacompute-0"/>
+        </downed>
+      </crm_event>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="8">
     <action_set>
       <pseudo_event id="42" operation="all_stopped" operation_key="all_stopped">
         <attributes />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="30" operation="stop" operation_key="overcloud-novacompute-0_stop_0" on_node="overcloud-controller-0" on_node_uuid="1"/>
       </trigger>
+      <trigger>
+        <pseudo_event id="209" operation="stonith_complete" operation_key="stonith_complete"/>
+      </trigger>
     </inputs>
   </synapse>
 </transition_graph>
diff --git a/pengine/test10/remote-fence-unclean-3.summary b/pengine/test10/remote-fence-unclean-3.summary
index ec54d8e481..6d1559815e 100644
--- a/pengine/test10/remote-fence-unclean-3.summary
+++ b/pengine/test10/remote-fence-unclean-3.summary
@@ -1,82 +1,85 @@
 
 Current cluster status:
 Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
 RemoteOFFLINE: [ overcloud-novacompute-0 ]
 Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ]
 
  fence1	(stonith:fence_xvm):	Stopped
  overcloud-novacompute-0	(ocf::pacemaker:remote):	FAILED overcloud-controller-0
  Docker container set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]
    rabbitmq-bundle-0	(ocf::heartbeat:rabbitmq-cluster):	Started overcloud-controller-0
    rabbitmq-bundle-1	(ocf::heartbeat:rabbitmq-cluster):	Started overcloud-controller-1
    rabbitmq-bundle-2	(ocf::heartbeat:rabbitmq-cluster):	Started overcloud-controller-2
  Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]
    galera-bundle-0	(ocf::heartbeat:galera):	Master overcloud-controller-0
    galera-bundle-1	(ocf::heartbeat:galera):	Master overcloud-controller-1
    galera-bundle-2	(ocf::heartbeat:galera):	Master overcloud-controller-2
  Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]
    redis-bundle-0	(ocf::heartbeat:redis):	Master overcloud-controller-0
    redis-bundle-1	(ocf::heartbeat:redis):	Slave overcloud-controller-1
    redis-bundle-2	(ocf::heartbeat:redis):	Slave overcloud-controller-2
  ip-192.168.24.9	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-0
  ip-10.0.0.7	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-1
  ip-172.16.2.4	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-2
  ip-172.16.2.8	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-0
  ip-172.16.1.9	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-1
  ip-172.16.3.9	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-2
  Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]
    haproxy-bundle-docker-0	(ocf::heartbeat:docker):	Started overcloud-controller-0
    haproxy-bundle-docker-1	(ocf::heartbeat:docker):	Started overcloud-controller-1
    haproxy-bundle-docker-2	(ocf::heartbeat:docker):	Started overcloud-controller-2
  Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]
    openstack-cinder-volume-docker-0	(ocf::heartbeat:docker):	Started overcloud-controller-0
  Docker container: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest]
    openstack-cinder-backup-docker-0	(ocf::heartbeat:docker):	Started overcloud-controller-1
 
 Transition Summary:
+ * Fence (reboot) overcloud-novacompute-0 'the connection is unrecoverable'
  * Start   fence1	(overcloud-controller-0)
  * Stop    overcloud-novacompute-0	(overcloud-controller-0)
 
 Executing cluster transition:
  * Resource action: fence1          monitor on overcloud-controller-2
  * Resource action: fence1          monitor on overcloud-controller-1
  * Resource action: fence1          monitor on overcloud-controller-0
  * Resource action: overcloud-novacompute-0 stop on overcloud-controller-0
+ * Fencing overcloud-novacompute-0 (reboot)
+ * Pseudo action:   stonith_complete
  * Pseudo action:   all_stopped
  * Resource action: fence1          start on overcloud-controller-0
  * Resource action: fence1          monitor=60000 on overcloud-controller-0
 
 Revised cluster status:
 Online: [ overcloud-controller-0 overcloud-controller-1 overcloud-controller-2 ]
 RemoteOFFLINE: [ overcloud-novacompute-0 ]
 Containers: [ galera-bundle-0:galera-bundle-docker-0 galera-bundle-1:galera-bundle-docker-1 galera-bundle-2:galera-bundle-docker-2 rabbitmq-bundle-0:rabbitmq-bundle-docker-0 rabbitmq-bundle-1:rabbitmq-bundle-docker-1 rabbitmq-bundle-2:rabbitmq-bundle-docker-2 redis-bundle-0:redis-bundle-docker-0 redis-bundle-1:redis-bundle-docker-1 redis-bundle-2:redis-bundle-docker-2 ]
 
  fence1	(stonith:fence_xvm):	Started overcloud-controller-0
  overcloud-novacompute-0	(ocf::pacemaker:remote):	Stopped
  Docker container set: rabbitmq-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-rabbitmq:latest]
    rabbitmq-bundle-0	(ocf::heartbeat:rabbitmq-cluster):	Started overcloud-controller-0
    rabbitmq-bundle-1	(ocf::heartbeat:rabbitmq-cluster):	Started overcloud-controller-1
    rabbitmq-bundle-2	(ocf::heartbeat:rabbitmq-cluster):	Started overcloud-controller-2
  Docker container set: galera-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-mariadb:latest]
    galera-bundle-0	(ocf::heartbeat:galera):	Master overcloud-controller-0
    galera-bundle-1	(ocf::heartbeat:galera):	Master overcloud-controller-1
    galera-bundle-2	(ocf::heartbeat:galera):	Master overcloud-controller-2
  Docker container set: redis-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-redis:latest]
    redis-bundle-0	(ocf::heartbeat:redis):	Master overcloud-controller-0
    redis-bundle-1	(ocf::heartbeat:redis):	Slave overcloud-controller-1
    redis-bundle-2	(ocf::heartbeat:redis):	Slave overcloud-controller-2
  ip-192.168.24.9	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-0
  ip-10.0.0.7	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-1
  ip-172.16.2.4	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-2
  ip-172.16.2.8	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-0
  ip-172.16.1.9	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-1
  ip-172.16.3.9	(ocf::heartbeat:IPaddr2):	Started overcloud-controller-2
  Docker container set: haproxy-bundle [192.168.24.1:8787/tripleoupstream/centos-binary-haproxy:latest]
    haproxy-bundle-docker-0	(ocf::heartbeat:docker):	Started overcloud-controller-0
    haproxy-bundle-docker-1	(ocf::heartbeat:docker):	Started overcloud-controller-1
    haproxy-bundle-docker-2	(ocf::heartbeat:docker):	Started overcloud-controller-2
  Docker container: openstack-cinder-volume [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-volume:latest]
    openstack-cinder-volume-docker-0	(ocf::heartbeat:docker):	Started overcloud-controller-0
  Docker container: openstack-cinder-backup [192.168.24.1:8787/tripleoupstream/centos-binary-cinder-backup:latest]
    openstack-cinder-backup-docker-0	(ocf::heartbeat:docker):	Started overcloud-controller-1
 
diff --git a/pengine/test10/remote-reconnect-delay.dot b/pengine/test10/remote-reconnect-delay.dot
new file mode 100644
index 0000000000..42c2cef36e
--- /dev/null
+++ b/pengine/test10/remote-reconnect-delay.dot
@@ -0,0 +1,9 @@
+digraph "g" {
+"Fencing_monitor_120000 rhel7-2" [ style=bold color="green" fontcolor="black"]
+"Fencing_start_0 rhel7-2" -> "Fencing_monitor_120000 rhel7-2" [ style = bold]
+"Fencing_start_0 rhel7-2" [ style=bold color="green" fontcolor="black"]
+"Fencing_stop_0 rhel7-2" -> "Fencing_start_0 rhel7-2" [ style = bold]
+"Fencing_stop_0 rhel7-2" -> "all_stopped" [ style = bold]
+"Fencing_stop_0 rhel7-2" [ style=bold color="green" fontcolor="black"]
+"all_stopped" [ style=bold color="green" fontcolor="orange"]
+}
diff --git a/pengine/test10/remote-reconnect-delay.exp b/pengine/test10/remote-reconnect-delay.exp
new file mode 100644
index 0000000000..fc616c2c0e
--- /dev/null
+++ b/pengine/test10/remote-reconnect-delay.exp
@@ -0,0 +1,49 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="1"  transition_id="0">
+  <synapse id="0">
+    <action_set>
+      <rsc_op id="23" operation="stop" operation_key="Fencing_stop_0" on_node="rhel7-2" on_node_uuid="2">
+        <primitive id="Fencing" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_name="stop" CRM_meta_on_node="rhel7-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="60000"  multicast_address="239.255.100.100" pcmk_arg_map="domain:uname" pcmk_host_list="rhel7-1 remote-rhel7-1 rhel7-2 remote-rhel7-2 remote-rhel7-3 rhel7-4 remote-rhel7-4 rhel7-5 remote-rhel7-5" pcmk_host_map="remote-rhel7-1:rhel7-1;remote-rhel7-2:rhel7-2;remote-rhel7-3:rhel7-3;remote-rhel7-4:rhel7-4;remote-rhel7-5:rhel7-5;"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <rsc_op id="22" operation="start" operation_key="Fencing_start_0" on_node="rhel7-2" on_node_uuid="2">
+        <primitive id="Fencing" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_name="start" CRM_meta_on_node="rhel7-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="60000"  multicast_address="239.255.100.100" pcmk_arg_map="domain:uname" pcmk_host_list="rhel7-1 remote-rhel7-1 rhel7-2 remote-rhel7-2 remote-rhel7-3 rhel7-4 remote-rhel7-4 rhel7-5 remote-rhel7-5" pcmk_host_map="remote-rhel7-1:rhel7-1;remote-rhel7-2:rhel7-2;remote-rhel7-3:rhel7-3;remote-rhel7-4:rhel7-4;remote-rhel7-5:rhel7-5;"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="23" operation="stop" operation_key="Fencing_stop_0" on_node="rhel7-2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <rsc_op id="8" operation="monitor" operation_key="Fencing_monitor_120000" on_node="rhel7-2" on_node_uuid="2">
+        <primitive id="Fencing" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_interval="120000" CRM_meta_name="monitor" CRM_meta_on_node="rhel7-2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="120000"  multicast_address="239.255.100.100" pcmk_arg_map="domain:uname" pcmk_host_list="rhel7-1 remote-rhel7-1 rhel7-2 remote-rhel7-2 remote-rhel7-3 rhel7-4 remote-rhel7-4 rhel7-5 remote-rhel7-5" pcmk_host_map="remote-rhel7-1:rhel7-1;remote-rhel7-2:rhel7-2;remote-rhel7-3:rhel7-3;remote-rhel7-4:rhel7-4;remote-rhel7-5:rhel7-5;"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="22" operation="start" operation_key="Fencing_start_0" on_node="rhel7-2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="3">
+    <action_set>
+      <pseudo_event id="21" operation="all_stopped" operation_key="all_stopped">
+        <attributes />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="23" operation="stop" operation_key="Fencing_stop_0" on_node="rhel7-2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+</transition_graph>
diff --git a/pengine/test10/remote-reconnect-delay.scores b/pengine/test10/remote-reconnect-delay.scores
new file mode 100644
index 0000000000..411af2828d
--- /dev/null
+++ b/pengine/test10/remote-reconnect-delay.scores
@@ -0,0 +1,207 @@
+Allocation scores:
+Using the original execution date of: 2017-08-21 17:12:54Z
+clone_color: Connectivity allocation score on remote-rhel7-3: 0
+clone_color: Connectivity allocation score on rhel7-1: 0
+clone_color: Connectivity allocation score on rhel7-2: 0
+clone_color: Connectivity allocation score on rhel7-4: 0
+clone_color: Connectivity allocation score on rhel7-5: 0
+clone_color: master-1 allocation score on remote-rhel7-3: -INFINITY
+clone_color: master-1 allocation score on rhel7-1: 0
+clone_color: master-1 allocation score on rhel7-2: 0
+clone_color: master-1 allocation score on rhel7-4: 0
+clone_color: master-1 allocation score on rhel7-5: 0
+clone_color: ping-1:0 allocation score on remote-rhel7-3: 0
+clone_color: ping-1:0 allocation score on rhel7-1: 1
+clone_color: ping-1:0 allocation score on rhel7-2: 0
+clone_color: ping-1:0 allocation score on rhel7-4: 0
+clone_color: ping-1:0 allocation score on rhel7-5: 0
+clone_color: ping-1:1 allocation score on remote-rhel7-3: 0
+clone_color: ping-1:1 allocation score on rhel7-1: 0
+clone_color: ping-1:1 allocation score on rhel7-2: 1
+clone_color: ping-1:1 allocation score on rhel7-4: 0
+clone_color: ping-1:1 allocation score on rhel7-5: 0
+clone_color: ping-1:2 allocation score on remote-rhel7-3: 0
+clone_color: ping-1:2 allocation score on rhel7-1: 0
+clone_color: ping-1:2 allocation score on rhel7-2: 0
+clone_color: ping-1:2 allocation score on rhel7-4: 1
+clone_color: ping-1:2 allocation score on rhel7-5: 0
+clone_color: ping-1:3 allocation score on remote-rhel7-3: 0
+clone_color: ping-1:3 allocation score on rhel7-1: 0
+clone_color: ping-1:3 allocation score on rhel7-2: 0
+clone_color: ping-1:3 allocation score on rhel7-4: 0
+clone_color: ping-1:3 allocation score on rhel7-5: 1
+clone_color: ping-1:4 allocation score on remote-rhel7-3: 0
+clone_color: ping-1:4 allocation score on rhel7-1: 0
+clone_color: ping-1:4 allocation score on rhel7-2: 0
+clone_color: ping-1:4 allocation score on rhel7-4: 0
+clone_color: ping-1:4 allocation score on rhel7-5: 0
+clone_color: stateful-1:0 allocation score on remote-rhel7-3: -INFINITY
+clone_color: stateful-1:0 allocation score on rhel7-1: 6
+clone_color: stateful-1:0 allocation score on rhel7-2: 0
+clone_color: stateful-1:0 allocation score on rhel7-4: 0
+clone_color: stateful-1:0 allocation score on rhel7-5: 0
+clone_color: stateful-1:1 allocation score on remote-rhel7-3: -INFINITY
+clone_color: stateful-1:1 allocation score on rhel7-1: 0
+clone_color: stateful-1:1 allocation score on rhel7-2: 11
+clone_color: stateful-1:1 allocation score on rhel7-4: 0
+clone_color: stateful-1:1 allocation score on rhel7-5: 0
+clone_color: stateful-1:2 allocation score on remote-rhel7-3: -INFINITY
+clone_color: stateful-1:2 allocation score on rhel7-1: 0
+clone_color: stateful-1:2 allocation score on rhel7-2: 0
+clone_color: stateful-1:2 allocation score on rhel7-4: 6
+clone_color: stateful-1:2 allocation score on rhel7-5: 0
+clone_color: stateful-1:3 allocation score on remote-rhel7-3: -INFINITY
+clone_color: stateful-1:3 allocation score on rhel7-1: 0
+clone_color: stateful-1:3 allocation score on rhel7-2: 0
+clone_color: stateful-1:3 allocation score on rhel7-4: 0
+clone_color: stateful-1:3 allocation score on rhel7-5: 6
+clone_color: stateful-1:4 allocation score on remote-rhel7-3: -INFINITY
+clone_color: stateful-1:4 allocation score on rhel7-1: 0
+clone_color: stateful-1:4 allocation score on rhel7-2: 0
+clone_color: stateful-1:4 allocation score on rhel7-4: 0
+clone_color: stateful-1:4 allocation score on rhel7-5: 0
+group_color: group-1 allocation score on remote-rhel7-3: 0
+group_color: group-1 allocation score on rhel7-1: 0
+group_color: group-1 allocation score on rhel7-2: 0
+group_color: group-1 allocation score on rhel7-4: 0
+group_color: group-1 allocation score on rhel7-5: 0
+group_color: petulant allocation score on remote-rhel7-3: 0
+group_color: petulant allocation score on rhel7-1: 0
+group_color: petulant allocation score on rhel7-2: 0
+group_color: petulant allocation score on rhel7-4: 0
+group_color: petulant allocation score on rhel7-5: 0
+group_color: r192.168.122.207 allocation score on remote-rhel7-3: 0
+group_color: r192.168.122.207 allocation score on rhel7-1: 0
+group_color: r192.168.122.207 allocation score on rhel7-2: 0
+group_color: r192.168.122.207 allocation score on rhel7-4: 0
+group_color: r192.168.122.207 allocation score on rhel7-5: 0
+group_color: r192.168.122.208 allocation score on remote-rhel7-3: 0
+group_color: r192.168.122.208 allocation score on rhel7-1: 0
+group_color: r192.168.122.208 allocation score on rhel7-2: 0
+group_color: r192.168.122.208 allocation score on rhel7-4: 0
+group_color: r192.168.122.208 allocation score on rhel7-5: 0
+native_color: Fencing allocation score on remote-rhel7-3: -INFINITY
+native_color: Fencing allocation score on rhel7-1: 0
+native_color: Fencing allocation score on rhel7-2: 0
+native_color: Fencing allocation score on rhel7-4: 0
+native_color: Fencing allocation score on rhel7-5: 0
+native_color: FencingFail allocation score on remote-rhel7-3: -INFINITY
+native_color: FencingFail allocation score on rhel7-1: 0
+native_color: FencingFail allocation score on rhel7-2: 0
+native_color: FencingFail allocation score on rhel7-4: 0
+native_color: FencingFail allocation score on rhel7-5: 0
+native_color: lsb-dummy allocation score on remote-rhel7-3: -INFINITY
+native_color: lsb-dummy allocation score on rhel7-1: -INFINITY
+native_color: lsb-dummy allocation score on rhel7-2: 0
+native_color: lsb-dummy allocation score on rhel7-4: -INFINITY
+native_color: lsb-dummy allocation score on rhel7-5: -INFINITY
+native_color: migrator allocation score on remote-rhel7-3: 0
+native_color: migrator allocation score on rhel7-1: 0
+native_color: migrator allocation score on rhel7-2: 0
+native_color: migrator allocation score on rhel7-4: 0
+native_color: migrator allocation score on rhel7-5: 1
+native_color: petulant allocation score on remote-rhel7-3: -INFINITY
+native_color: petulant allocation score on rhel7-1: -INFINITY
+native_color: petulant allocation score on rhel7-2: 0
+native_color: petulant allocation score on rhel7-4: -INFINITY
+native_color: petulant allocation score on rhel7-5: -INFINITY
+native_color: ping-1:0 allocation score on remote-rhel7-3: -INFINITY
+native_color: ping-1:0 allocation score on rhel7-1: 1
+native_color: ping-1:0 allocation score on rhel7-2: 0
+native_color: ping-1:0 allocation score on rhel7-4: 0
+native_color: ping-1:0 allocation score on rhel7-5: 0
+native_color: ping-1:1 allocation score on remote-rhel7-3: -INFINITY
+native_color: ping-1:1 allocation score on rhel7-1: -INFINITY
+native_color: ping-1:1 allocation score on rhel7-2: 1
+native_color: ping-1:1 allocation score on rhel7-4: 0
+native_color: ping-1:1 allocation score on rhel7-5: 0
+native_color: ping-1:2 allocation score on remote-rhel7-3: -INFINITY
+native_color: ping-1:2 allocation score on rhel7-1: -INFINITY
+native_color: ping-1:2 allocation score on rhel7-2: -INFINITY
+native_color: ping-1:2 allocation score on rhel7-4: 1
+native_color: ping-1:2 allocation score on rhel7-5: 0
+native_color: ping-1:3 allocation score on remote-rhel7-3: -INFINITY
+native_color: ping-1:3 allocation score on rhel7-1: -INFINITY
+native_color: ping-1:3 allocation score on rhel7-2: -INFINITY
+native_color: ping-1:3 allocation score on rhel7-4: -INFINITY
+native_color: ping-1:3 allocation score on rhel7-5: 1
+native_color: ping-1:4 allocation score on remote-rhel7-3: -INFINITY
+native_color: ping-1:4 allocation score on rhel7-1: -INFINITY
+native_color: ping-1:4 allocation score on rhel7-2: -INFINITY
+native_color: ping-1:4 allocation score on rhel7-4: -INFINITY
+native_color: ping-1:4 allocation score on rhel7-5: -INFINITY
+native_color: r192.168.122.207 allocation score on remote-rhel7-3: -INFINITY
+native_color: r192.168.122.207 allocation score on rhel7-1: -INFINITY
+native_color: r192.168.122.207 allocation score on rhel7-2: 11
+native_color: r192.168.122.207 allocation score on rhel7-4: -INFINITY
+native_color: r192.168.122.207 allocation score on rhel7-5: -INFINITY
+native_color: r192.168.122.208 allocation score on remote-rhel7-3: -INFINITY
+native_color: r192.168.122.208 allocation score on rhel7-1: -INFINITY
+native_color: r192.168.122.208 allocation score on rhel7-2: 0
+native_color: r192.168.122.208 allocation score on rhel7-4: -INFINITY
+native_color: r192.168.122.208 allocation score on rhel7-5: -INFINITY
+native_color: remote-rhel7-3 allocation score on remote-rhel7-3: -INFINITY
+native_color: remote-rhel7-3 allocation score on rhel7-1: -INFINITY
+native_color: remote-rhel7-3 allocation score on rhel7-2: -INFINITY
+native_color: remote-rhel7-3 allocation score on rhel7-4: -INFINITY
+native_color: remote-rhel7-3 allocation score on rhel7-5: -INFINITY
+native_color: remote-rsc allocation score on remote-rhel7-3: INFINITY
+native_color: remote-rsc allocation score on rhel7-1: 0
+native_color: remote-rsc allocation score on rhel7-2: 0
+native_color: remote-rsc allocation score on rhel7-4: 0
+native_color: remote-rsc allocation score on rhel7-5: 0
+native_color: rsc_rhel7-1 allocation score on remote-rhel7-3: 0
+native_color: rsc_rhel7-1 allocation score on rhel7-1: 100
+native_color: rsc_rhel7-1 allocation score on rhel7-2: 0
+native_color: rsc_rhel7-1 allocation score on rhel7-4: 0
+native_color: rsc_rhel7-1 allocation score on rhel7-5: 0
+native_color: rsc_rhel7-2 allocation score on remote-rhel7-3: 0
+native_color: rsc_rhel7-2 allocation score on rhel7-1: 0
+native_color: rsc_rhel7-2 allocation score on rhel7-2: 100
+native_color: rsc_rhel7-2 allocation score on rhel7-4: 0
+native_color: rsc_rhel7-2 allocation score on rhel7-5: 0
+native_color: rsc_rhel7-3 allocation score on remote-rhel7-3: 0
+native_color: rsc_rhel7-3 allocation score on rhel7-1: 0
+native_color: rsc_rhel7-3 allocation score on rhel7-2: 0
+native_color: rsc_rhel7-3 allocation score on rhel7-4: 0
+native_color: rsc_rhel7-3 allocation score on rhel7-5: 0
+native_color: rsc_rhel7-4 allocation score on remote-rhel7-3: 0
+native_color: rsc_rhel7-4 allocation score on rhel7-1: 0
+native_color: rsc_rhel7-4 allocation score on rhel7-2: 0
+native_color: rsc_rhel7-4 allocation score on rhel7-4: 100
+native_color: rsc_rhel7-4 allocation score on rhel7-5: 0
+native_color: rsc_rhel7-5 allocation score on remote-rhel7-3: 0
+native_color: rsc_rhel7-5 allocation score on rhel7-1: 0
+native_color: rsc_rhel7-5 allocation score on rhel7-2: 0
+native_color: rsc_rhel7-5 allocation score on rhel7-4: 0
+native_color: rsc_rhel7-5 allocation score on rhel7-5: 100
+native_color: stateful-1:0 allocation score on remote-rhel7-3: -INFINITY
+native_color: stateful-1:0 allocation score on rhel7-1: 6
+native_color: stateful-1:0 allocation score on rhel7-2: -INFINITY
+native_color: stateful-1:0 allocation score on rhel7-4: 0
+native_color: stateful-1:0 allocation score on rhel7-5: 0
+native_color: stateful-1:1 allocation score on remote-rhel7-3: -INFINITY
+native_color: stateful-1:1 allocation score on rhel7-1: 0
+native_color: stateful-1:1 allocation score on rhel7-2: 11
+native_color: stateful-1:1 allocation score on rhel7-4: 0
+native_color: stateful-1:1 allocation score on rhel7-5: 0
+native_color: stateful-1:2 allocation score on remote-rhel7-3: -INFINITY
+native_color: stateful-1:2 allocation score on rhel7-1: -INFINITY
+native_color: stateful-1:2 allocation score on rhel7-2: -INFINITY
+native_color: stateful-1:2 allocation score on rhel7-4: 6
+native_color: stateful-1:2 allocation score on rhel7-5: 0
+native_color: stateful-1:3 allocation score on remote-rhel7-3: -INFINITY
+native_color: stateful-1:3 allocation score on rhel7-1: -INFINITY
+native_color: stateful-1:3 allocation score on rhel7-2: -INFINITY
+native_color: stateful-1:3 allocation score on rhel7-4: -INFINITY
+native_color: stateful-1:3 allocation score on rhel7-5: 6
+native_color: stateful-1:4 allocation score on remote-rhel7-3: -INFINITY
+native_color: stateful-1:4 allocation score on rhel7-1: -INFINITY
+native_color: stateful-1:4 allocation score on rhel7-2: -INFINITY
+native_color: stateful-1:4 allocation score on rhel7-4: -INFINITY
+native_color: stateful-1:4 allocation score on rhel7-5: -INFINITY
+stateful-1:0 promotion score on rhel7-1: 5
+stateful-1:1 promotion score on rhel7-2: 10
+stateful-1:2 promotion score on rhel7-4: 5
+stateful-1:3 promotion score on rhel7-5: 5
+stateful-1:4 promotion score on none: 0
diff --git a/pengine/test10/remote-reconnect-delay.summary b/pengine/test10/remote-reconnect-delay.summary
new file mode 100644
index 0000000000..ea11483e46
--- /dev/null
+++ b/pengine/test10/remote-reconnect-delay.summary
@@ -0,0 +1,66 @@
+Using the original execution date of: 2017-08-21 17:12:54Z
+
+Current cluster status:
+Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
+RemoteOFFLINE: [ remote-rhel7-3 ]
+
+ Fencing	(stonith:fence_xvm):	Started rhel7-2
+ FencingFail	(stonith:fence_dummy):	Started rhel7-4
+ rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-1
+ rsc_rhel7-2	(ocf::heartbeat:IPaddr2):	Started rhel7-2
+ rsc_rhel7-3	(ocf::heartbeat:IPaddr2):	Started rhel7-5
+ rsc_rhel7-4	(ocf::heartbeat:IPaddr2):	Started rhel7-4
+ rsc_rhel7-5	(ocf::heartbeat:IPaddr2):	Started rhel7-5
+ migrator	(ocf::pacemaker:Dummy):	Started rhel7-5
+ Clone Set: Connectivity [ping-1]
+     Started: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
+     Stopped: [ remote-rhel7-3 ]
+ Master/Slave Set: master-1 [stateful-1]
+     Masters: [ rhel7-2 ]
+     Slaves: [ rhel7-1 rhel7-4 rhel7-5 ]
+     Stopped: [ remote-rhel7-3 ]
+ Resource Group: group-1
+     r192.168.122.207	(ocf::heartbeat:IPaddr2):	Started rhel7-2
+     petulant	(service:DummySD):	Started rhel7-2
+     r192.168.122.208	(ocf::heartbeat:IPaddr2):	Started rhel7-2
+ lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started rhel7-2
+ remote-rhel7-3	(ocf::pacemaker:remote):	FAILED
+ remote-rsc	(ocf::heartbeat:Dummy):	Started rhel7-1
+
+Transition Summary:
+ * Restart    Fencing     ( rhel7-2 )  
+
+Executing cluster transition:
+ * Resource action: Fencing         stop on rhel7-2
+ * Resource action: Fencing         start on rhel7-2
+ * Resource action: Fencing         monitor=120000 on rhel7-2
+ * Pseudo action:   all_stopped
+Using the original execution date of: 2017-08-21 17:12:54Z
+
+Revised cluster status:
+Online: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
+RemoteOFFLINE: [ remote-rhel7-3 ]
+
+ Fencing	(stonith:fence_xvm):	Started rhel7-2
+ FencingFail	(stonith:fence_dummy):	Started rhel7-4
+ rsc_rhel7-1	(ocf::heartbeat:IPaddr2):	Started rhel7-1
+ rsc_rhel7-2	(ocf::heartbeat:IPaddr2):	Started rhel7-2
+ rsc_rhel7-3	(ocf::heartbeat:IPaddr2):	Started rhel7-5
+ rsc_rhel7-4	(ocf::heartbeat:IPaddr2):	Started rhel7-4
+ rsc_rhel7-5	(ocf::heartbeat:IPaddr2):	Started rhel7-5
+ migrator	(ocf::pacemaker:Dummy):	Started rhel7-5
+ Clone Set: Connectivity [ping-1]
+     Started: [ rhel7-1 rhel7-2 rhel7-4 rhel7-5 ]
+     Stopped: [ remote-rhel7-3 ]
+ Master/Slave Set: master-1 [stateful-1]
+     Masters: [ rhel7-2 ]
+     Slaves: [ rhel7-1 rhel7-4 rhel7-5 ]
+     Stopped: [ remote-rhel7-3 ]
+ Resource Group: group-1
+     r192.168.122.207	(ocf::heartbeat:IPaddr2):	Started rhel7-2
+     petulant	(service:DummySD):	Started rhel7-2
+     r192.168.122.208	(ocf::heartbeat:IPaddr2):	Started rhel7-2
+ lsb-dummy	(lsb:/usr/share/pacemaker/tests/cts/LSBDummy):	Started rhel7-2
+ remote-rhel7-3	(ocf::pacemaker:remote):	FAILED
+ remote-rsc	(ocf::heartbeat:Dummy):	Started rhel7-1
+
diff --git a/pengine/test10/remote-reconnect-delay.xml b/pengine/test10/remote-reconnect-delay.xml
new file mode 100644
index 0000000000..e9ed3e67ae
--- /dev/null
+++ b/pengine/test10/remote-reconnect-delay.xml
@@ -0,0 +1,504 @@
+<cib crm_feature_set="3.0.14" validate-with="pacemaker-2.5" epoch="49" num_updates="13" admin_epoch="0" cib-last-written="Mon Aug 21 12:12:00 2017" update-origin="rhel7-1" update-client="crm_resource" update-user="remote-rhel7-3" have-quorum="1" dc-uuid="2" execution-date="1503335574">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cts-stonith-enabled" name="stonith-enabled" value="1"/>
+        <nvpair id="cts-start-failure-is-fatal" name="start-failure-is-fatal" value="false"/>
+        <nvpair id="cts-pe-input-series-max" name="pe-input-series-max" value="5000"/>
+        <nvpair id="cts-default-action-timeout" name="default-action-timeout" value="90s"/>
+        <nvpair id="cts-shutdown-escalation" name="shutdown-escalation" value="5min"/>
+        <nvpair id="cts-batch-limit" name="batch-limit" value="10"/>
+        <nvpair id="cts-dc-deadtime" name="dc-deadtime" value="5s"/>
+        <nvpair id="cts-no-quorum-policy" name="no-quorum-policy" value="stop"/>
+        <nvpair id="cts-expected-quorum-votes" name="expected-quorum-votes" value="5"/>
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.17-553.368fb3e.git.el7.centos-368fb3e"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
+        <nvpair id="cts-recheck-interval-setting" name="cluster-recheck-interval" value="45s"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="4" uname="rhel7-4"/>
+      <node id="5" uname="rhel7-5"/>
+      <node id="3" uname="rhel7-1"/>
+      <node id="2" uname="rhel7-2"/>
+    </nodes>
+    <resources>
+      <primitive id="Fencing" class="stonith" type="fence_xvm">
+        <meta_attributes id="Fencing-meta">
+          <nvpair id="Fencing-migration-threshold" name="migration-threshold" value="5"/>
+        </meta_attributes>
+        <instance_attributes id="Fencing-params">
+          <nvpair id="Fencing-multicast_address" name="multicast_address" value="239.255.100.100"/>
+          <nvpair id="Fencing-pcmk_arg_map" name="pcmk_arg_map" value="domain:uname"/>
+          <nvpair id="Fencing-pcmk_host_map" name="pcmk_host_map" value="remote-rhel7-1:rhel7-1;remote-rhel7-2:rhel7-2;remote-rhel7-3:rhel7-3;remote-rhel7-4:rhel7-4;remote-rhel7-5:rhel7-5;"/>
+          <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="rhel7-1 remote-rhel7-1 rhel7-2 remote-rhel7-2 remote-rhel7-3 rhel7-4 remote-rhel7-4 rhel7-5 remote-rhel7-5"/>
+        </instance_attributes>
+        <operations>
+          <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
+          <op id="Fencing-stop-0" interval="0" name="stop" timeout="60s"/>
+          <op id="Fencing-start-0" interval="0" name="start" timeout="60s"/>
+        </operations>
+      </primitive>
+      <primitive id="FencingFail" class="stonith" type="fence_dummy">
+        <instance_attributes id="FencingFail-params">
+          <nvpair id="FencingFail-random_sleep_range" name="random_sleep_range" value="30"/>
+          <nvpair id="FencingFail-pcmk_host_list" name="pcmk_host_list" value="rhel7-1 remote-rhel7-1 rhel7-4 remote-rhel7-4"/>
+          <nvpair id="FencingFail-mode" name="mode" value="fail"/>
+        </instance_attributes>
+      </primitive>
+      <primitive id="rsc_rhel7-1" class="ocf" type="IPaddr2" provider="heartbeat">
+        <instance_attributes id="rsc_rhel7-1-params">
+          <nvpair id="rsc_rhel7-1-ip" name="ip" value="192.168.122.202"/>
+          <nvpair id="rsc_rhel7-1-cidr_netmask" name="cidr_netmask" value="32"/>
+        </instance_attributes>
+        <operations>
+          <op id="rsc_rhel7-1-monitor-5s" interval="5s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive id="rsc_rhel7-2" class="ocf" type="IPaddr2" provider="heartbeat">
+        <instance_attributes id="rsc_rhel7-2-params">
+          <nvpair id="rsc_rhel7-2-ip" name="ip" value="192.168.122.203"/>
+          <nvpair id="rsc_rhel7-2-cidr_netmask" name="cidr_netmask" value="32"/>
+        </instance_attributes>
+        <operations>
+          <op id="rsc_rhel7-2-monitor-5s" interval="5s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive id="rsc_rhel7-3" class="ocf" type="IPaddr2" provider="heartbeat">
+        <instance_attributes id="rsc_rhel7-3-params">
+          <nvpair id="rsc_rhel7-3-ip" name="ip" value="192.168.122.204"/>
+          <nvpair id="rsc_rhel7-3-cidr_netmask" name="cidr_netmask" value="32"/>
+        </instance_attributes>
+        <operations>
+          <op id="rsc_rhel7-3-monitor-5s" interval="5s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive id="rsc_rhel7-4" class="ocf" type="IPaddr2" provider="heartbeat">
+        <instance_attributes id="rsc_rhel7-4-params">
+          <nvpair id="rsc_rhel7-4-ip" name="ip" value="192.168.122.205"/>
+          <nvpair id="rsc_rhel7-4-cidr_netmask" name="cidr_netmask" value="32"/>
+        </instance_attributes>
+        <operations>
+          <op id="rsc_rhel7-4-monitor-5s" interval="5s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive id="rsc_rhel7-5" class="ocf" type="IPaddr2" provider="heartbeat">
+        <instance_attributes id="rsc_rhel7-5-params">
+          <nvpair id="rsc_rhel7-5-ip" name="ip" value="192.168.122.206"/>
+          <nvpair id="rsc_rhel7-5-cidr_netmask" name="cidr_netmask" value="32"/>
+        </instance_attributes>
+        <operations>
+          <op id="rsc_rhel7-5-monitor-5s" interval="5s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive id="migrator" class="ocf" type="Dummy" provider="pacemaker">
+        <meta_attributes id="migrator-meta">
+          <nvpair id="migrator-allow-migrate" name="allow-migrate" value="1"/>
+          <nvpair id="migrator-resource-stickiness" name="resource-stickiness" value="1"/>
+        </meta_attributes>
+        <instance_attributes id="migrator-params">
+          <nvpair id="migrator-passwd" name="passwd" value="whatever"/>
+        </instance_attributes>
+        <operations>
+          <op id="migrator-monitor-P10S" interval="P10S" name="monitor"/>
+        </operations>
+      </primitive>
+      <clone id="Connectivity">
+        <meta_attributes id="Connectivity-meta">
+          <nvpair id="Connectivity-globally-unique" name="globally-unique" value="false"/>
+        </meta_attributes>
+        <primitive id="ping-1" class="ocf" type="ping" provider="pacemaker">
+          <instance_attributes id="ping-1-params">
+            <nvpair id="ping-1-debug" name="debug" value="true"/>
+            <nvpair id="ping-1-host_list" name="host_list" value="192.168.122.70"/>
+            <nvpair id="ping-1-name" name="name" value="connected"/>
+          </instance_attributes>
+          <operations>
+            <op id="ping-1-monitor-60s" interval="60s" name="monitor"/>
+          </operations>
+        </primitive>
+      </clone>
+      <master id="master-1">
+        <meta_attributes id="master-1-meta">
+          <nvpair id="master-1-master-node-max" name="master-node-max" value="1"/>
+          <nvpair id="master-1-clone-max" name="clone-max" value="5"/>
+          <nvpair id="master-1-master-max" name="master-max" value="1"/>
+          <nvpair id="master-1-clone-node-max" name="clone-node-max" value="1"/>
+        </meta_attributes>
+        <primitive id="stateful-1" class="ocf" type="Stateful" provider="pacemaker">
+          <operations>
+            <op id="stateful-1-monitor-15s" interval="15s" name="monitor" timeout="60s"/>
+            <op id="stateful-1-monitor-16s" interval="16s" role="Master" name="monitor" timeout="60s"/>
+          </operations>
+        </primitive>
+      </master>
+      <group id="group-1">
+        <primitive id="r192.168.122.207" class="ocf" type="IPaddr2" provider="heartbeat">
+          <instance_attributes id="r192.168.122.207-params">
+            <nvpair id="r192.168.122.207-ip" name="ip" value="192.168.122.207"/>
+            <nvpair id="r192.168.122.207-cidr_netmask" name="cidr_netmask" value="32"/>
+          </instance_attributes>
+          <operations>
+            <op id="r192.168.122.207-monitor-5s" interval="5s" name="monitor"/>
+          </operations>
+        </primitive>
+        <primitive id="petulant" class="service" type="DummySD">
+          <operations>
+            <op id="petulant-monitor-P10S" interval="P10S" name="monitor"/>
+          </operations>
+        </primitive>
+        <primitive id="r192.168.122.208" class="ocf" type="IPaddr2" provider="heartbeat">
+          <instance_attributes id="r192.168.122.208-params">
+            <nvpair id="r192.168.122.208-ip" name="ip" value="192.168.122.208"/>
+            <nvpair id="r192.168.122.208-cidr_netmask" name="cidr_netmask" value="32"/>
+          </instance_attributes>
+          <operations>
+            <op id="r192.168.122.208-monitor-5s" interval="5s" name="monitor"/>
+          </operations>
+        </primitive>
+      </group>
+      <primitive id="lsb-dummy" class="lsb" type="/usr/share/pacemaker/tests/cts/LSBDummy">
+        <operations>
+          <op id="lsb-dummy-monitor-5s" interval="5s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="remote-rhel7-3" provider="pacemaker" type="remote">
+        <instance_attributes id="remote-instance_attributes">
+          <nvpair id="remote-instance_attributes-server" name="server" value="rhel7-3"/>
+          <nvpair id="remote-instance_attributes-reconnect_interval" name="reconnect_interval" value="60s"/>
+        </instance_attributes>
+        <operations>
+          <op id="remote-monitor-interval-60s" interval="60s" name="monitor"/>
+          <op id="remote-name-start-interval-0-timeout-120" interval="0" name="start" timeout="60"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="remote-rsc" provider="heartbeat" type="Dummy">
+        <operations>
+          <op id="remote-rsc-monitor-interval-10s" interval="10s" name="monitor"/>
+        </operations>
+        <meta_attributes id="remote-meta_attributes"/>
+      </primitive>
+    </resources>
+    <constraints>
+      <rsc_location id="prefer-rhel7-1" rsc="rsc_rhel7-1">
+        <rule id="prefer-rhel7-1-r" score="100" boolean-op="and">
+          <expression id="prefer-rhel7-1-e" attribute="#uname" operation="eq" value="rhel7-1"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="prefer-rhel7-2" rsc="rsc_rhel7-2">
+        <rule id="prefer-rhel7-2-r" score="100" boolean-op="and">
+          <expression id="prefer-rhel7-2-e" attribute="#uname" operation="eq" value="rhel7-2"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="prefer-rhel7-4" rsc="rsc_rhel7-4">
+        <rule id="prefer-rhel7-4-r" score="100" boolean-op="and">
+          <expression id="prefer-rhel7-4-e" attribute="#uname" operation="eq" value="rhel7-4"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="prefer-rhel7-5" rsc="rsc_rhel7-5">
+        <rule id="prefer-rhel7-5-r" score="100" boolean-op="and">
+          <expression id="prefer-rhel7-5-e" attribute="#uname" operation="eq" value="rhel7-5"/>
+        </rule>
+      </rsc_location>
+      <rsc_location id="prefer-connected" rsc="master-1">
+        <rule id="connected" score="-INFINITY" boolean-op="or">
+          <expression id="m1-connected-1" attribute="connected" operation="lt" value="1"/>
+          <expression id="m1-connected-2" attribute="connected" operation="not_defined"/>
+        </rule>
+      </rsc_location>
+      <rsc_order id="group-1-after-master-1" first="master-1" then="group-1" kind="Mandatory" first-action="promote" then-action="start"/>
+      <rsc_colocation id="group-1-with-master-1" rsc="group-1" with-rsc="master-1" score="INFINITY" with-rsc-role="Master"/>
+      <rsc_order id="lsb-dummy-after-group-1" first="group-1" then="lsb-dummy" kind="Mandatory" first-action="start" then-action="start"/>
+      <rsc_colocation id="lsb-dummy-with-group-1" rsc="lsb-dummy" with-rsc="group-1" score="INFINITY"/>
+      <rsc_location id="cli-prefer-remote-rsc" rsc="remote-rsc" role="Started" node="remote-rhel7-3" score="INFINITY"/>
+    </constraints>
+    <fencing-topology>
+      <fencing-level id="cts-rhel7-1.1" index="1" target="rhel7-1" devices="FencingFail"/>
+      <fencing-level id="cts-rhel7-1.2" index="2" target="rhel7-1" devices="Fencing"/>
+      <fencing-level id="cts-remote-rhel7-1.1" index="1" target="remote-rhel7-1" devices="FencingFail"/>
+      <fencing-level id="cts-remote-rhel7-1.2" index="2" target="remote-rhel7-1" devices="Fencing"/>
+      <fencing-level id="cts-rhel7-4.1" index="1" target="rhel7-4" devices="FencingFail"/>
+      <fencing-level id="cts-rhel7-4.2" index="2" target="rhel7-4" devices="Fencing"/>
+      <fencing-level id="cts-remote-rhel7-4.1" index="1" target="remote-rhel7-4" devices="FencingFail"/>
+      <fencing-level id="cts-remote-rhel7-4.2" index="2" target="remote-rhel7-4" devices="Fencing"/>
+    </fencing-topology>
+    <alerts>
+      <alert id="alert-1" path="/var/lib/pacemaker/notify.sh">
+        <recipient id="alert-1-recipient-1" value="/run/crm/alert.log"/>
+      </alert>
+    </alerts>
+  </configuration>
+  <status>
+    <node_state id="3" uname="rhel7-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="3">
+        <instance_attributes id="status-3">
+          <nvpair id="status-3-connected" name="connected" value="1"/>
+          <nvpair id="status-3-master-stateful-1" name="master-stateful-1" value="5"/>
+          <nvpair id="status-3-fail-count-remote-rhel7-3.monitor_60000" name="fail-count-remote-rhel7-3#monitor_60000" value="1"/>
+          <nvpair id="status-3-last-failure-remote-rhel7-3.monitor_60000" name="last-failure-remote-rhel7-3#monitor_60000" value="1503335523"/>
+        </instance_attributes>
+      </transient_attributes>
+      <lrm id="3">
+        <lrm_resources>
+          <lrm_resource id="r192.168.122.207" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.207_last_0" operation_key="r192.168.122.207_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="31:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;31:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="54" rc-code="7" op-status="0" interval="0" last-run="1503335089" last-rc-change="1503335089" exec-time="155" queue-time="0" op-digest="455141de0d85faf791392b0857f9dea1" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="lsb-dummy" type="/usr/share/pacemaker/tests/cts/LSBDummy" class="lsb">
+            <lrm_rsc_op id="lsb-dummy_last_0" operation_key="lsb-dummy_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="34:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;34:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="67" rc-code="7" op-status="0" interval="0" last-run="1503335089" last-rc-change="1503335089" exec-time="21" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="r192.168.122.208" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.208_last_0" operation_key="r192.168.122.208_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="33:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;33:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="63" rc-code="7" op-status="0" interval="0" last-run="1503335089" last-rc-change="1503335089" exec-time="67" queue-time="0" op-digest="d62599e347d2c3a524c13e135846a774" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="39:1:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;39:1:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="143" rc-code="0" op-status="0" interval="0" last-run="1503335297" last-rc-change="1503335297" exec-time="2" queue-time="0" op-digest="9401ff83703cb25678335741cf0bca04" ra-version="4.0.0"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="37:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;37:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="192" rc-code="0" op-status="0" interval="0" last-run="1503335492" last-rc-change="1503335492" exec-time="12" queue-time="0" op-digest="2409d7001df14f35973634911d9875b1" ra-version="0.1"/>
+            <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="25:20:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;25:20:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="188" rc-code="0" op-status="0" interval="120000" last-rc-change="1503335472" exec-time="5" queue-time="0" op-digest="87e7768825db29d87bc1f2119ebffe0e" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="petulant" type="DummySD" class="service">
+            <lrm_rsc_op id="petulant_last_0" operation_key="petulant_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="32:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;32:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="59" rc-code="7" op-status="0" interval="0" last-run="1503335089" last-rc-change="1503335089" exec-time="62" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="ping-1" type="ping" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ping-1_last_0" operation_key="ping-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="60:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;60:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="45" rc-code="0" op-status="0" interval="0" last-run="1503335084" last-rc-change="1503335084" exec-time="2064" queue-time="0" op-digest="029004076c53d493cd6e9f6661b1b083" ra-version="1.0"/>
+            <lrm_rsc_op id="ping-1_monitor_60000" operation_key="ping-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="61:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;61:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1503335086" exec-time="2103" queue-time="0" op-digest="8654ff74c1be19c9385009b41becc0c8" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="stateful-1" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="stateful-1_last_0" operation_key="stateful-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="62:17:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;62:17:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="68" rc-code="0" op-status="0" interval="0" last-run="1503335091" last-rc-change="1503335091" exec-time="61" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="1.0"/>
+            <lrm_rsc_op id="stateful-1_monitor_15000" operation_key="stateful-1_monitor_15000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="54:18:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;54:18:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="70" rc-code="0" op-status="0" interval="15000" last-rc-change="1503335092" exec-time="14" queue-time="0" op-digest="873ed4f07792aa8ff18f3254244675ea" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-1" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-1_last_0" operation_key="rsc_rhel7-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="40:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;40:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="34" rc-code="0" op-status="0" interval="0" last-run="1503335084" last-rc-change="1503335084" exec-time="4160" queue-time="0" op-digest="75df1567eb9457f8f3c4486bbf875846" ra-version="0.1"/>
+            <lrm_rsc_op id="rsc_rhel7-1_monitor_5000" operation_key="rsc_rhel7-1_monitor_5000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="41:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;41:16:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="49" rc-code="0" op-status="0" interval="5000" last-rc-change="1503335088" exec-time="188" queue-time="0" op-digest="045c3d6f5e29b94dc4e3fbfd6c2c0693" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-2" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-2_last_0" operation_key="rsc_rhel7-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="24:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;24:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="17" rc-code="7" op-status="0" interval="0" last-run="1503335084" last-rc-change="1503335084" exec-time="162" queue-time="0" op-digest="ced6f8a1916ebbe555cedafe69985e63" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-3" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-3_last_0" operation_key="rsc_rhel7-3_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="25:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;25:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="21" rc-code="7" op-status="0" interval="0" last-run="1503335084" last-rc-change="1503335084" exec-time="148" queue-time="0" op-digest="3a5f279381f73d4be861526d72bb17a3" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-4" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-4_last_0" operation_key="rsc_rhel7-4_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="26:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;26:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="25" rc-code="7" op-status="0" interval="0" last-run="1503335084" last-rc-change="1503335084" exec-time="127" queue-time="0" op-digest="b4b6b30b67042d5bc4c1735b0df27dc0" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-5" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-5_last_0" operation_key="rsc_rhel7-5_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="47:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;47:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="137" rc-code="0" op-status="0" interval="0" last-run="1503335234" last-rc-change="1503335234" exec-time="72" queue-time="0" op-digest="aca525581410dfda70831f2846b9807d"/>
+          </lrm_resource>
+          <lrm_resource id="migrator" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="migrator_last_0" operation_key="migrator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="28:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;28:16:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-1" call-id="33" rc-code="7" op-status="0" interval="0" last-run="1503335084" last-rc-change="1503335084" exec-time="54" queue-time="0" op-digest="5de129d7fe42dbcfe537f2c63b1921b6" ra-version="1.0" op-force-restart=" fail_start_on  fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="5de129d7fe42dbcfe537f2c63b1921b6"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rhel7-3" type="remote" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="remote-rhel7-3_last_0" operation_key="remote-rhel7-3_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="6:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;6:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="18" rc-code="0" op-status="0" interval="0" last-run="1503335525" last-rc-change="1503335525" exec-time="0" queue-time="0" op-digest="261b4fde9eb7965580f152a20c775486" ra-version="0.1" op-force-restart=" reconnect_interval  port " op-restart-digest="1dbaa1569a09b2a43b8d2b8037a48749"/>
+            <lrm_rsc_op id="remote-rhel7-3_monitor_60000" operation_key="remote-rhel7-3_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="96:23:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;96:23:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="17" rc-code="0" op-status="0" interval="60000" last-rc-change="1503335516" exec-time="0" queue-time="0" op-digest="ed1bab57863fa1dec83afb184f98eb57" ra-version="0.1"/>
+            <lrm_rsc_op id="remote-rhel7-3_last_failure_0" operation_key="remote-rhel7-3_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="96:23:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="4:1;96:23:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="17" rc-code="1" op-status="4" interval="60000" last-rc-change="1503335523" exec-time="0" queue-time="0" op-digest="ed1bab57863fa1dec83afb184f98eb57" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rsc" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="remote-rsc_last_0" operation_key="remote-rsc_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="85:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;85:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="215" rc-code="0" op-status="0" interval="0" last-run="1503335525" last-rc-change="1503335525" exec-time="10" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="0.9" op-force-restart=" fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="remote-rsc_monitor_10000" operation_key="remote-rsc_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="86:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;86:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-1" call-id="217" rc-code="0" op-status="0" interval="10000" last-rc-change="1503335525" exec-time="11" queue-time="0" op-digest="8f6a313464b7f9e3a31cb448458b700e" ra-version="0.9"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="2" uname="rhel7-2" crmd="online" crm-debug-origin="do_update_resource" in_ccm="true" join="member" expected="member">
+      <transient_attributes id="2">
+        <instance_attributes id="status-2">
+          <nvpair id="status-2-connected" name="connected" value="1"/>
+          <nvpair id="status-2-master-stateful-1" name="master-stateful-1" value="10"/>
+        </instance_attributes>
+      </transient_attributes>
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="r192.168.122.207" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.207_last_0" operation_key="r192.168.122.207_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="63:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;63:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="87" rc-code="0" op-status="0" interval="0" last-run="1503334913" last-rc-change="1503334913" exec-time="4104" queue-time="0" op-digest="455141de0d85faf791392b0857f9dea1" ra-version="0.1"/>
+            <lrm_rsc_op id="r192.168.122.207_monitor_5000" operation_key="r192.168.122.207_monitor_5000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="64:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;64:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="89" rc-code="0" op-status="0" interval="5000" last-rc-change="1503334917" exec-time="33" queue-time="0" op-digest="e265b034f446ca5c724ff25c223f1078" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="r192.168.122.208" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.208_last_0" operation_key="r192.168.122.208_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="67:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;67:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="95" rc-code="0" op-status="0" interval="0" last-run="1503334928" last-rc-change="1503334928" exec-time="4067" queue-time="0" op-digest="d62599e347d2c3a524c13e135846a774" ra-version="0.1"/>
+            <lrm_rsc_op id="r192.168.122.208_monitor_5000" operation_key="r192.168.122.208_monitor_5000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="68:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;68:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="97" rc-code="0" op-status="0" interval="5000" last-rc-change="1503334932" exec-time="29" queue-time="0" op-digest="10ff4c567237e36994b5c19f129f6a1e" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="40:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;40:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-2" call-id="176" rc-code="0" op-status="0" interval="0" last-run="1503335492" last-rc-change="1503335492" exec-time="0" queue-time="0" op-digest="9401ff83703cb25678335741cf0bca04" ra-version="4.0.0"/>
+          </lrm_resource>
+          <lrm_resource id="lsb-dummy" type="/usr/share/pacemaker/tests/cts/LSBDummy" class="lsb">
+            <lrm_rsc_op id="lsb-dummy_last_0" operation_key="lsb-dummy_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="73:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;73:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="99" rc-code="0" op-status="0" interval="0" last-run="1503334932" last-rc-change="1503334932" exec-time="4" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="lsb-dummy_monitor_5000" operation_key="lsb-dummy_monitor_5000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="74:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;74:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="101" rc-code="0" op-status="0" interval="5000" last-rc-change="1503334932" exec-time="4" queue-time="0" op-digest="8f6a313464b7f9e3a31cb448458b700e"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="38:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;38:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-2" call-id="179" rc-code="0" op-status="0" interval="0" last-run="1503335492" last-rc-change="1503335492" exec-time="34" queue-time="0" op-digest="2409d7001df14f35973634911d9875b1" ra-version="0.1"/>
+            <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="39:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;39:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-2" call-id="181" rc-code="0" op-status="0" interval="120000" last-rc-change="1503335492" exec-time="15" queue-time="0" op-digest="87e7768825db29d87bc1f2119ebffe0e" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="petulant" type="DummySD" class="service">
+            <lrm_rsc_op id="petulant_last_0" operation_key="petulant_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="65:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;65:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="91" rc-code="0" op-status="0" interval="0" last-run="1503334917" last-rc-change="1503334917" exec-time="10195" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="petulant_monitor_10000" operation_key="petulant_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="66:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;66:0:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="93" rc-code="0" op-status="0" interval="10000" last-rc-change="1503334928" exec-time="2" queue-time="1" op-digest="8f6a313464b7f9e3a31cb448458b700e"/>
+          </lrm_resource>
+          <lrm_resource id="ping-1" type="ping" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ping-1_last_0" operation_key="ping-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="79:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:0;79:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="50" rc-code="0" op-status="0" interval="0" last-run="1503334858" last-rc-change="1503334858" exec-time="2073" queue-time="0" op-digest="029004076c53d493cd6e9f6661b1b083" ra-version="1.0"/>
+            <lrm_rsc_op id="ping-1_monitor_60000" operation_key="ping-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="80:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:0;80:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="52" rc-code="0" op-status="0" interval="60000" last-rc-change="1503334860" exec-time="2043" queue-time="0" op-digest="8654ff74c1be19c9385009b41becc0c8" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="stateful-1" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="stateful-1_last_0" operation_key="stateful-1_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="46:11:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:0;46:11:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="82" rc-code="0" op-status="0" interval="0" last-run="1503334912" last-rc-change="1503334912" exec-time="27" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="1.0"/>
+            <lrm_rsc_op id="stateful-1_monitor_16000" operation_key="stateful-1_monitor_16000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="48:0:8:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:8;48:0:8:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="85" rc-code="8" op-status="0" interval="16000" last-rc-change="1503334913" exec-time="21" queue-time="0" op-digest="873ed4f07792aa8ff18f3254244675ea" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-1" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-1_last_0" operation_key="rsc_rhel7-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="4:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:7;4:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="13" rc-code="7" op-status="0" interval="0" last-run="1503334832" last-rc-change="1503334832" exec-time="101" queue-time="0" op-digest="75df1567eb9457f8f3c4486bbf875846" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-2" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-2_last_0" operation_key="rsc_rhel7-2_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="63:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:0;63:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="20" rc-code="0" op-status="0" interval="0" last-run="1503334841" last-rc-change="1503334841" exec-time="4053" queue-time="0" op-digest="ced6f8a1916ebbe555cedafe69985e63" ra-version="0.1"/>
+            <lrm_rsc_op id="rsc_rhel7-2_monitor_5000" operation_key="rsc_rhel7-2_monitor_5000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="64:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:0;64:1:0:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="22" rc-code="0" op-status="0" interval="5000" last-rc-change="1503334845" exec-time="34" queue-time="0" op-digest="4385e7bd76844b9bc880a410b317b8ab" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-3" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-3_last_0" operation_key="rsc_rhel7-3_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="6:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:7;6:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="27" rc-code="7" op-status="0" interval="0" last-run="1503334845" last-rc-change="1503334845" exec-time="29" queue-time="0" op-digest="3a5f279381f73d4be861526d72bb17a3" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-4" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-4_last_0" operation_key="rsc_rhel7-4_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="7:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:7;7:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="31" rc-code="7" op-status="0" interval="0" last-run="1503334845" last-rc-change="1503334845" exec-time="29" queue-time="0" op-digest="b4b6b30b67042d5bc4c1735b0df27dc0" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-5" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-5_last_0" operation_key="rsc_rhel7-5_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="47:29:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;47:29:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-2" call-id="154" rc-code="0" op-status="0" interval="0" last-run="1503335169" last-rc-change="1503335169" exec-time="42" queue-time="0" op-digest="aca525581410dfda70831f2846b9807d"/>
+          </lrm_resource>
+          <lrm_resource id="migrator" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="migrator_last_0" operation_key="migrator_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="9:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" transition-magic="0:7;9:1:7:2dc02a24-b145-46ac-ba2d-ee462fdab7ab" on_node="rhel7-2" call-id="39" rc-code="7" op-status="0" interval="0" last-run="1503334850" last-rc-change="1503334850" exec-time="11" queue-time="0" op-digest="5de129d7fe42dbcfe537f2c63b1921b6" ra-version="1.0" op-force-restart=" fail_start_on  fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="5de129d7fe42dbcfe537f2c63b1921b6"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rhel7-3" type="remote" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="remote-rhel7-3_last_0" operation_key="remote-rhel7-3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="34:22:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;34:22:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-2" call-id="10" rc-code="7" op-status="0" interval="0" last-run="1503335492" last-rc-change="1503335492" exec-time="0" queue-time="0" op-digest="261b4fde9eb7965580f152a20c775486" ra-version="0.1" op-force-restart=" reconnect_interval  port " op-restart-digest="1dbaa1569a09b2a43b8d2b8037a48749"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rsc" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="remote-rsc_last_0" operation_key="remote-rsc_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="26:26:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;26:26:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-2" call-id="187" rc-code="7" op-status="0" interval="0" last-run="1503335519" last-rc-change="1503335519" exec-time="48" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="0.9" op-force-restart=" fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="4" uname="rhel7-4" crmd="online" crm-debug-origin="do_update_resource" in_ccm="true" join="member" expected="member">
+      <lrm id="4">
+        <lrm_resources>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="21:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;21:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1503335449" last-rc-change="1503335449" exec-time="2" queue-time="0" op-digest="2409d7001df14f35973634911d9875b1" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="41:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;41:22:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="78" rc-code="0" op-status="0" interval="0" last-run="1503335492" last-rc-change="1503335492" exec-time="11095" queue-time="0" op-digest="9401ff83703cb25678335741cf0bca04" ra-version="4.0.0"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-1" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-1_last_0" operation_key="rsc_rhel7-1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="23:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;23:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="13" rc-code="7" op-status="0" interval="0" last-run="1503335451" last-rc-change="1503335451" exec-time="176" queue-time="0" op-digest="75df1567eb9457f8f3c4486bbf875846" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-2" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-2_last_0" operation_key="rsc_rhel7-2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="24:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;24:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="17" rc-code="7" op-status="0" interval="0" last-run="1503335451" last-rc-change="1503335451" exec-time="163" queue-time="0" op-digest="ced6f8a1916ebbe555cedafe69985e63" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-3" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-3_last_0" operation_key="rsc_rhel7-3_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="41:23:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;41:23:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="81" rc-code="0" op-status="0" interval="0" last-run="1503335504" last-rc-change="1503335504" exec-time="41" queue-time="0" op-digest="3a5f279381f73d4be861526d72bb17a3" ra-version="0.1"/>
+            <lrm_rsc_op id="rsc_rhel7-3_monitor_5000" operation_key="rsc_rhel7-3_monitor_5000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="30:21:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;30:21:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="74" rc-code="0" op-status="0" interval="5000" last-rc-change="1503335476" exec-time="43" queue-time="0" op-digest="a1b2eeaa8d23ff33ffebd44f45931017" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-4" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-4_last_0" operation_key="rsc_rhel7-4_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="46:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;46:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="34" rc-code="0" op-status="0" interval="0" last-run="1503335451" last-rc-change="1503335451" exec-time="4131" queue-time="0" op-digest="b4b6b30b67042d5bc4c1735b0df27dc0" ra-version="0.1"/>
+            <lrm_rsc_op id="rsc_rhel7-4_monitor_5000" operation_key="rsc_rhel7-4_monitor_5000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="47:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;47:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="49" rc-code="0" op-status="0" interval="5000" last-rc-change="1503335455" exec-time="103" queue-time="0" op-digest="8ee22149973acaa2c4a338cde274ee1b" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="migrator" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="migrator_last_0" operation_key="migrator_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="28:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;28:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="33" rc-code="7" op-status="0" interval="0" last-run="1503335451" last-rc-change="1503335451" exec-time="40" queue-time="0" op-digest="5de129d7fe42dbcfe537f2c63b1921b6" ra-version="1.0" op-force-restart=" fail_start_on  fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="5de129d7fe42dbcfe537f2c63b1921b6"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-5" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-5_last_0" operation_key="rsc_rhel7-5_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="27:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;27:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="29" rc-code="7" op-status="0" interval="0" last-run="1503335451" last-rc-change="1503335451" exec-time="112" queue-time="0" op-digest="aca525581410dfda70831f2846b9807d" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="ping-1" type="ping" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ping-1_last_0" operation_key="ping-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="60:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;60:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="45" rc-code="0" op-status="0" interval="0" last-run="1503335451" last-rc-change="1503335451" exec-time="2045" queue-time="0" op-digest="029004076c53d493cd6e9f6661b1b083" ra-version="1.0"/>
+            <lrm_rsc_op id="ping-1_monitor_60000" operation_key="ping-1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="61:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;61:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1503335453" exec-time="2050" queue-time="0" op-digest="8654ff74c1be19c9385009b41becc0c8" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="stateful-1" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="stateful-1_last_0" operation_key="stateful-1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="62:18:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;62:18:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="68" rc-code="0" op-status="0" interval="0" last-run="1503335458" last-rc-change="1503335458" exec-time="33" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="1.0"/>
+            <lrm_rsc_op id="stateful-1_monitor_15000" operation_key="stateful-1_monitor_15000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="63:18:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;63:18:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="70" rc-code="0" op-status="0" interval="15000" last-rc-change="1503335458" exec-time="13" queue-time="0" op-digest="873ed4f07792aa8ff18f3254244675ea" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="petulant" type="DummySD" class="service">
+            <lrm_rsc_op id="petulant_last_0" operation_key="petulant_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="32:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;32:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="59" rc-code="7" op-status="0" interval="0" last-run="1503335455" last-rc-change="1503335455" exec-time="23" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="r192.168.122.207" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.207_last_0" operation_key="r192.168.122.207_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="31:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;31:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="54" rc-code="7" op-status="0" interval="0" last-run="1503335455" last-rc-change="1503335455" exec-time="66" queue-time="0" op-digest="455141de0d85faf791392b0857f9dea1" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="lsb-dummy" type="/usr/share/pacemaker/tests/cts/LSBDummy" class="lsb">
+            <lrm_rsc_op id="lsb-dummy_last_0" operation_key="lsb-dummy_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="34:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;34:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="67" rc-code="7" op-status="0" interval="0" last-run="1503335455" last-rc-change="1503335455" exec-time="16" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="r192.168.122.208" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.208_last_0" operation_key="r192.168.122.208_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="33:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;33:17:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="63" rc-code="7" op-status="0" interval="0" last-run="1503335455" last-rc-change="1503335455" exec-time="43" queue-time="0" op-digest="d62599e347d2c3a524c13e135846a774" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rhel7-3" type="remote" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="remote-rhel7-3_last_0" operation_key="remote-rhel7-3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="35:22:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;35:22:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="1" rc-code="7" op-status="0" interval="0" last-run="1503335492" last-rc-change="1503335492" exec-time="0" queue-time="0" op-digest="261b4fde9eb7965580f152a20c775486" ra-version="0.1" op-force-restart=" reconnect_interval  port " op-restart-digest="1dbaa1569a09b2a43b8d2b8037a48749"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rsc" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="remote-rsc_last_0" operation_key="remote-rsc_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="27:26:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;27:26:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-4" call-id="87" rc-code="7" op-status="0" interval="0" last-run="1503335519" last-rc-change="1503335519" exec-time="18" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="0.9" op-force-restart=" fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="4">
+        <instance_attributes id="status-4">
+          <nvpair id="status-4-connected" name="connected" value="1"/>
+          <nvpair id="status-4-master-stateful-1" name="master-stateful-1" value="5"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+    <node_state id="5" uname="rhel7-5" crmd="online" crm-debug-origin="do_update_resource" in_ccm="true" join="member" expected="member">
+      <transient_attributes id="5">
+        <instance_attributes id="status-5">
+          <nvpair id="status-5-connected" name="connected" value="1"/>
+          <nvpair id="status-5-master-stateful-1" name="master-stateful-1" value="5"/>
+        </instance_attributes>
+      </transient_attributes>
+      <lrm id="5">
+        <lrm_resources>
+          <lrm_resource id="r192.168.122.207" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.207_last_0" operation_key="r192.168.122.207_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="31:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;31:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="47" rc-code="7" op-status="0" interval="0" last-run="1503335234" last-rc-change="1503335234" exec-time="76" queue-time="0" op-digest="455141de0d85faf791392b0857f9dea1" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="lsb-dummy" type="/usr/share/pacemaker/tests/cts/LSBDummy" class="lsb">
+            <lrm_rsc_op id="lsb-dummy_last_0" operation_key="lsb-dummy_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="34:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;34:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="67" rc-code="7" op-status="0" interval="0" last-run="1503335238" last-rc-change="1503335238" exec-time="7" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="r192.168.122.208" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="r192.168.122.208_last_0" operation_key="r192.168.122.208_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="33:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;33:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="62" rc-code="7" op-status="0" interval="0" last-run="1503335238" last-rc-change="1503335238" exec-time="62" queue-time="0" op-digest="d62599e347d2c3a524c13e135846a774" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="FencingFail" type="fence_dummy" class="stonith">
+            <lrm_rsc_op id="FencingFail_last_0" operation_key="FencingFail_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="22:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;22:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="9" rc-code="7" op-status="0" interval="0" last-run="1503335231" last-rc-change="1503335231" exec-time="0" queue-time="0" op-digest="9401ff83703cb25678335741cf0bca04" ra-version="4.0.0"/>
+          </lrm_resource>
+          <lrm_resource id="Fencing" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="21:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;21:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1503335231" last-rc-change="1503335231" exec-time="3" queue-time="0" op-digest="2409d7001df14f35973634911d9875b1" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="petulant" type="DummySD" class="service">
+            <lrm_rsc_op id="petulant_last_0" operation_key="petulant_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="32:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;32:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="58" rc-code="7" op-status="0" interval="0" last-run="1503335238" last-rc-change="1503335238" exec-time="28" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+          <lrm_resource id="ping-1" type="ping" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ping-1_last_0" operation_key="ping-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="60:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;60:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="48" rc-code="0" op-status="0" interval="0" last-run="1503335234" last-rc-change="1503335234" exec-time="2083" queue-time="0" op-digest="029004076c53d493cd6e9f6661b1b083" ra-version="1.0"/>
+            <lrm_rsc_op id="ping-1_monitor_60000" operation_key="ping-1_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="61:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;61:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="51" rc-code="0" op-status="0" interval="60000" last-rc-change="1503335236" exec-time="2116" queue-time="0" op-digest="8654ff74c1be19c9385009b41becc0c8" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="stateful-1" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="stateful-1_last_0" operation_key="stateful-1_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="62:41:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;62:41:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="68" rc-code="0" op-status="0" interval="0" last-run="1503335241" last-rc-change="1503335241" exec-time="51" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="1.0"/>
+            <lrm_rsc_op id="stateful-1_monitor_15000" operation_key="stateful-1_monitor_15000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="64:42:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;64:42:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="70" rc-code="0" op-status="0" interval="15000" last-rc-change="1503335241" exec-time="14" queue-time="0" op-digest="873ed4f07792aa8ff18f3254244675ea" ra-version="1.0"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-1" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-1_last_0" operation_key="rsc_rhel7-1_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="23:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;23:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="13" rc-code="7" op-status="0" interval="0" last-run="1503335233" last-rc-change="1503335233" exec-time="218" queue-time="0" op-digest="75df1567eb9457f8f3c4486bbf875846" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-2" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-2_last_0" operation_key="rsc_rhel7-2_monitor_0" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="24:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:7;24:40:7:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="17" rc-code="7" op-status="0" interval="0" last-run="1503335233" last-rc-change="1503335233" exec-time="181" queue-time="0" op-digest="ced6f8a1916ebbe555cedafe69985e63" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-3" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-3_last_0" operation_key="rsc_rhel7-3_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="32:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;32:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-5" call-id="100" rc-code="0" op-status="0" interval="0" last-run="1503335525" last-rc-change="1503335525" exec-time="4125" queue-time="1" op-digest="3a5f279381f73d4be861526d72bb17a3" ra-version="0.1"/>
+            <lrm_rsc_op id="rsc_rhel7-3_monitor_5000" operation_key="rsc_rhel7-3_monitor_5000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="33:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;33:29:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-5" call-id="102" rc-code="0" op-status="0" interval="5000" last-rc-change="1503335529" exec-time="36" queue-time="0" op-digest="a1b2eeaa8d23ff33ffebd44f45931017" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-4" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-4_last_0" operation_key="rsc_rhel7-4_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="45:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;45:17:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-5" call-id="86" rc-code="0" op-status="0" interval="0" last-run="1503335451" last-rc-change="1503335451" exec-time="52" queue-time="0" op-digest="b4b6b30b67042d5bc4c1735b0df27dc0" ra-version="0.1"/>
+            <lrm_rsc_op id="rsc_rhel7-4_monitor_5000" operation_key="rsc_rhel7-4_monitor_5000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="31:44:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;31:44:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="74" rc-code="0" op-status="0" interval="5000" last-rc-change="1503335283" exec-time="30" queue-time="0" op-digest="8ee22149973acaa2c4a338cde274ee1b" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="rsc_rhel7-5" type="IPaddr2" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc_rhel7-5_last_0" operation_key="rsc_rhel7-5_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="48:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;48:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="49" rc-code="0" op-status="0" interval="0" last-run="1503335234" last-rc-change="1503335234" exec-time="4056" queue-time="0" op-digest="aca525581410dfda70831f2846b9807d" ra-version="0.1"/>
+            <lrm_rsc_op id="rsc_rhel7-5_monitor_5000" operation_key="rsc_rhel7-5_monitor_5000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.14" transition-key="49:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" transition-magic="0:0;49:40:0:a4c95233-ef19-40fd-89f0-0a7ce1f787a6" on_node="rhel7-5" call-id="53" rc-code="0" op-status="0" interval="5000" last-rc-change="1503335238" exec-time="140" queue-time="0" op-digest="51292f6c89131cf04bf857325f0e4041" ra-version="0.1"/>
+          </lrm_resource>
+          <lrm_resource id="migrator" type="Dummy" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="migrator_last_0" operation_key="migrator_migrate_from_0" operation="migrate_from" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="43:20:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;43:20:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-5" call-id="89" rc-code="0" op-status="0" interval="0" last-run="1503335472" last-rc-change="1503335472" exec-time="28" queue-time="0" op-digest="5de129d7fe42dbcfe537f2c63b1921b6" ra-version="1.0" op-force-restart=" fail_start_on  fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params=" passwd " op-secure-digest="5de129d7fe42dbcfe537f2c63b1921b6" migrate_source="rhel7-3" migrate_target="rhel7-5"/>
+            <lrm_rsc_op id="migrator_monitor_10000" operation_key="migrator_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="41:20:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:0;41:20:0:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-5" call-id="91" rc-code="0" op-status="0" interval="10000" last-rc-change="1503335472" exec-time="16" queue-time="0" op-digest="9210327bce4f7670c7b350bf32101791" ra-version="1.0" op-secure-params=" passwd " op-secure-digest="5de129d7fe42dbcfe537f2c63b1921b6"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rhel7-3" type="remote" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="remote-rhel7-3_last_0" operation_key="remote-rhel7-3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="36:22:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;36:22:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-5" call-id="2" rc-code="7" op-status="0" interval="0" last-run="1503335492" last-rc-change="1503335492" exec-time="0" queue-time="0" op-digest="261b4fde9eb7965580f152a20c775486" ra-version="0.1" op-force-restart=" reconnect_interval  port " op-restart-digest="1dbaa1569a09b2a43b8d2b8037a48749"/>
+          </lrm_resource>
+          <lrm_resource id="remote-rsc" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="remote-rsc_last_0" operation_key="remote-rsc_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.14" transition-key="28:26:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" transition-magic="0:7;28:26:7:231989e8-bacb-4b30-ad73-d034cf9fd2ed" on_node="rhel7-5" call-id="98" rc-code="7" op-status="0" interval="0" last-run="1503335519" last-rc-change="1503335519" exec-time="22" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" ra-version="0.9" op-force-restart=" fake " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state remote_node="true" id="remote-rhel7-1" uname="remote-rhel7-1" in_ccm="false" crm-debug-origin="post_cache_update"/>
+    <node_state remote_node="true" id="remote-rhel7-3" uname="remote-rhel7-3" in_ccm="false" crm-debug-origin="remote_node_down" node_fenced="1503335525"/>
+  </status>
+</cib>