diff --git a/crmd/lrm.c b/crmd/lrm.c
index 1519a9a771..28b6fa5bc7 100644
--- a/crmd/lrm.c
+++ b/crmd/lrm.c
@@ -1,2123 +1,2123 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 
 #include <crm/crm.h>
 #include <crm/services.h>
 
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <crmd.h>
 #include <crmd_fsa.h>
 #include <crmd_messages.h>
 #include <crmd_callbacks.h>
 #include <crmd_lrm.h>
 
 #define START_DELAY_THRESHOLD 5 * 60 * 1000
 #define MAX_LRM_REG_FAILS 30
 
 struct delete_event_s {
     int rc;
     const char *rsc;
     lrm_state_t *lrm_state;
 };
 
 gboolean process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op);
 static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id);
 static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list);
 static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data);
 static int delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options,
                              const char *user_name);
 
 static lrmd_event_data_t *construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op,
                                        const char *rsc_id, const char *operation);
 static void do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation,
                           xmlNode * msg, xmlNode * request);
 
 void send_direct_ack(const char *to_host, const char *to_sys,
                      lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id);
 
 static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state,
                                          int log_level);
 
 static void
 lrm_connection_destroy(void)
 {
     if (is_set(fsa_input_register, R_LRM_CONNECTED)) {
         crm_crit("LRM Connection failed");
         register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
         clear_bit(fsa_input_register, R_LRM_CONNECTED);
 
     } else {
         crm_info("LRM Connection disconnected");
     }
 
 }
 
 static char *
 make_stop_id(const char *rsc, int call_id)
 {
     char *op_id = NULL;
 
     op_id = calloc(1, strlen(rsc) + 34);
     if (op_id != NULL) {
         snprintf(op_id, strlen(rsc) + 34, "%s:%d", rsc, call_id);
     }
     return op_id;
 }
 
 static void
 copy_instance_keys(gpointer key, gpointer value, gpointer user_data)
 {
     if (strstr(key, CRM_META "_") == NULL) {
         g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value));
     }
 }
 
 static void
 copy_meta_keys(gpointer key, gpointer value, gpointer user_data)
 {
     if (strstr(key, CRM_META "_") != NULL) {
         g_hash_table_replace(user_data, strdup((const char *)key), strdup((const char *)value));
     }
 }
 
 static void
 update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op)
 {
     int target_rc = 0;
     rsc_history_t *entry = NULL;
 
     if (op->rsc_deleted) {
         crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type);
         delete_rsc_status(lrm_state, op->rsc_id, cib_quorum_override, NULL);
         return;
     }
 
     if (safe_str_eq(op->op_type, RSC_NOTIFY)) {
         return;
     }
 
     crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type);
 
     entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id);
     if (entry == NULL && rsc) {
         entry = calloc(1, sizeof(rsc_history_t));
         entry->id = strdup(op->rsc_id);
         g_hash_table_insert(lrm_state->resource_history, entry->id, entry);
 
         entry->rsc.id = entry->id;
         entry->rsc.type = strdup(rsc->type);
         entry->rsc.class = strdup(rsc->class);
         if (rsc->provider) {
             entry->rsc.provider = strdup(rsc->provider);
         } else {
             entry->rsc.provider = NULL;
         }
 
     } else if (entry == NULL) {
         crm_info("Resource %s no longer exists, not updating cache", op->rsc_id);
         return;
     }
 
     entry->last_callid = op->call_id;
     target_rc = rsc_op_expected_rc(op);
     if (op->op_status == PCMK_LRM_OP_CANCELLED) {
         if (op->interval > 0) {
             GList *gIter, *gIterNext;
 
             crm_trace("Removing cancelled recurring op: %s_%s_%d", op->rsc_id, op->op_type,
                       op->interval);
 
             for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIterNext) {
                 lrmd_event_data_t *existing = gIter->data;
 
                 gIterNext = gIter->next;
 
                 if (safe_str_eq(op->rsc_id, existing->rsc_id)
                     && safe_str_eq(op->op_type, existing->op_type)
                     && op->interval == existing->interval) {
                     lrmd_free_event(existing);
                     entry->recurring_op_list = g_list_delete_link(entry->recurring_op_list, gIter);
                 }
             }
             return;
 
         } else {
             crm_trace("Skipping %s_%s_%d rc=%d, status=%d", op->rsc_id, op->op_type, op->interval,
                       op->rc, op->op_status);
         }
 
     } else if (did_rsc_op_fail(op, target_rc)) {
         /* We must store failed monitors here
          * - otherwise the block below will cause them to be forgetten them when a stop happens
          */
         if (entry->failed) {
             lrmd_free_event(entry->failed);
         }
         entry->failed = lrmd_copy_event(op);
 
     } else if (op->interval == 0) {
         if (entry->last) {
             lrmd_free_event(entry->last);
         }
         entry->last = lrmd_copy_event(op);
 
         if (op->params &&
             (safe_str_eq(CRMD_ACTION_START, op->op_type) ||
              safe_str_eq(CRMD_ACTION_STATUS, op->op_type))) {
 
             if (entry->stop_params) {
                 g_hash_table_destroy(entry->stop_params);
             }
             entry->stop_params = g_hash_table_new_full(crm_str_hash,
                                                        g_str_equal, g_hash_destroy_str,
                                                        g_hash_destroy_str);
 
             g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params);
         }
     }
 
     if (op->interval > 0) {
         GListPtr iter = NULL;
 
         for(iter = entry->recurring_op_list; iter; iter = iter->next) {
             lrmd_event_data_t *o = iter->data;
 
             /* op->rsc_id is implied */
             if(op->interval == o->interval && strcmp(op->op_type, o->op_type) == 0) {
                 crm_trace("Removing existing recurring op entry: %s_%s_%d", op->rsc_id, op->op_type, op->interval);
                 entry->recurring_op_list = g_list_remove(entry->recurring_op_list, o);
                 break;
             }
         }
 
         crm_trace("Adding recurring op: %s_%s_%d", op->rsc_id, op->op_type, op->interval);
         entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op));
 
     } else if (entry->recurring_op_list && safe_str_eq(op->op_type, RSC_STATUS) == FALSE) {
         GList *gIter = entry->recurring_op_list;
 
         crm_trace("Dropping %d recurring ops because of: %s_%s_%d",
                   g_list_length(gIter), op->rsc_id, op->op_type, op->interval);
         for (; gIter != NULL; gIter = gIter->next) {
             lrmd_free_event(gIter->data);
         }
         g_list_free(entry->recurring_op_list);
         entry->recurring_op_list = NULL;
     }
 }
 
 void
 lrm_op_callback(lrmd_event_data_t * op)
 {
     const char *nodename = NULL;
     lrm_state_t *lrm_state = NULL;
 
     CRM_CHECK(op != NULL, return);
 
     /* determine the node name for this connection. */
     nodename = op->remote_nodename ? op->remote_nodename : fsa_our_uname;
 
     if (op->type == lrmd_event_disconnect && (safe_str_eq(nodename, fsa_our_uname))) {
         /* if this is the local lrmd ipc connection, set the right bits in the
          * crmd when the connection goes down */
         lrm_connection_destroy();
         return;
     } else if (op->type != lrmd_event_exec_complete) {
         /* we only need to process execution results */
         return;
     }
 
     lrm_state = lrm_state_find(nodename);
     CRM_ASSERT(lrm_state != NULL);
 
     process_lrm_event(lrm_state, op);
 }
 
 /*	 A_LRM_CONNECT	*/
 void
 do_lrm_control(long long action,
                enum crmd_fsa_cause cause,
                enum crmd_fsa_state cur_state,
                enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     /* This only pertains to local lrmd connections.  Remote connections are handled as
      * resources within the pengine.  Connecting and disconnecting from remote lrmd instances
      * handled differently than the local. */
 
     lrm_state_t *lrm_state = NULL;
 
     if(fsa_our_uname == NULL) {
         return; /* Nothing to do */
     }
     lrm_state = lrm_state_find_or_create(fsa_our_uname);
     if (lrm_state == NULL) {
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
         return;
     }
 
     if (action & A_LRM_DISCONNECT) {
         if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) {
             if (action == A_LRM_DISCONNECT) {
                 crmd_fsa_stall(FALSE);
                 return;
             }
         }
 
         clear_bit(fsa_input_register, R_LRM_CONNECTED);
         crm_info("Disconnecting from the LRM");
         lrm_state_disconnect(lrm_state);
         lrm_state_reset_tables(lrm_state);
         crm_notice("Disconnected from the LRM");
     }
 
     if (action & A_LRM_CONNECT) {
         int ret = pcmk_ok;
 
         crm_debug("Connecting to the LRM");
         ret = lrm_state_ipc_connect(lrm_state);
 
         if (ret != pcmk_ok) {
             if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) {
                 crm_warn("Failed to sign on to the LRM %d"
                          " (%d max) times", lrm_state->num_lrm_register_fails, MAX_LRM_REG_FAILS);
 
                 crm_timer_start(wait_timer);
                 crmd_fsa_stall(FALSE);
                 return;
             }
         }
 
         if (ret != pcmk_ok) {
             crm_err("Failed to sign on to the LRM %d" " (max) times",
                     lrm_state->num_lrm_register_fails);
             register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
             return;
         }
 
         set_bit(fsa_input_register, R_LRM_CONNECTED);
         crm_info("LRM connection established");
     }
 
     if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) {
         crm_err("Unexpected action %s in %s", fsa_action2string(action), __FUNCTION__);
     }
 }
 
 static gboolean
 lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level)
 {
     int counter = 0;
     gboolean rc = TRUE;
     const char *when = "lrm disconnect";
 
     GHashTableIter gIter;
     const char *key = NULL;
     rsc_history_t *entry = NULL;
     struct recurring_op_s *pending = NULL;
 
     crm_debug("Checking for active resources before exit");
 
     if (cur_state == S_TERMINATE) {
         log_level = LOG_ERR;
         when = "shutdown";
 
     } else if (is_set(fsa_input_register, R_SHUTDOWN)) {
         when = "shutdown... waiting";
     }
 
     if (lrm_state->pending_ops && lrm_state_is_connected(lrm_state) == TRUE) {
         guint removed = g_hash_table_foreach_remove(
             lrm_state->pending_ops, stop_recurring_actions, lrm_state);
 
         crm_notice("Stopped %u recurring operations at %s (%u ops remaining)",
                    removed, when, g_hash_table_size(lrm_state->pending_ops));
     }
 
     if (lrm_state->pending_ops) {
         g_hash_table_iter_init(&gIter, lrm_state->pending_ops);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) {
             /* Ignore recurring actions in the shutdown calculations */
             if (pending->interval == 0) {
                 counter++;
             }
         }
     }
 
     if (counter > 0) {
         do_crm_log(log_level, "%d pending LRM operations at %s", counter, when);
 
         if (cur_state == S_TERMINATE || !is_set(fsa_input_register, R_SENT_RSC_STOP)) {
             g_hash_table_iter_init(&gIter, lrm_state->pending_ops);
             while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) {
                 do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key);
             }
 
         } else {
             rc = FALSE;
         }
         return rc;
     }
 
     if (lrm_state->resource_history == NULL) {
         return rc;
     }
 
     if (cur_state == S_TERMINATE || is_set(fsa_input_register, R_SHUTDOWN)) {
         /* At this point we're not waiting, we're just shutting down */
         when = "shutdown";
     }
 
     counter = 0;
     g_hash_table_iter_init(&gIter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) {
         if (is_rsc_active(lrm_state, entry->id) == FALSE) {
             continue;
         }
 
         counter++;
         crm_trace("Found %s active", entry->id);
         if (lrm_state->pending_ops) {
             GHashTableIter hIter;
 
             g_hash_table_iter_init(&hIter, lrm_state->pending_ops);
             while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) {
                 if (safe_str_eq(entry->id, pending->rsc_id)) {
                     crm_notice("%sction %s (%s) incomplete at %s",
                                pending->interval == 0 ? "A" : "Recurring a",
                                key, pending->op_key, when);
                 }
             }
         }
     }
 
     if (counter) {
         crm_err("%d resources were active at %s.", counter, when);
     }
 
     return rc;
 }
 
 static char *
 get_rsc_metadata(const char *type, const char *class, const char *provider)
 {
     int rc = 0;
     char *metadata = NULL;
 
     /* Always use a local connection for this operation */
     lrm_state_t *lrm_state = lrm_state_find(fsa_our_uname);
 
     CRM_CHECK(type != NULL, return NULL);
     CRM_CHECK(class != NULL, return NULL);
     CRM_CHECK(lrm_state != NULL, return NULL);
 
     if (provider == NULL) {
         provider = "heartbeat";
     }
 
     crm_trace("Retreiving metadata for %s::%s:%s", type, class, provider);
     rc = lrm_state_get_metadata(lrm_state, class, provider, type, &metadata, 0);
 
     if (metadata) {
         /* copy the metadata because the LRM likes using
          *   g_alloc instead of cl_malloc
          */
         char *m_copy = strdup(metadata);
 
         g_free(metadata);
         metadata = m_copy;
 
     } else {
         crm_warn("No metadata found for %s::%s:%s: %s (%d)", type, class, provider, pcmk_strerror(rc), rc);
     }
 
     return metadata;
 }
 
 typedef struct reload_data_s {
     char *key;
     char *metadata;
     time_t last_query;
     gboolean can_reload;
     GListPtr restart_list;
 } reload_data_t;
 
 static void
 g_hash_destroy_reload(gpointer data)
 {
     reload_data_t *reload = data;
 
     free(reload->key);
     free(reload->metadata);
     g_list_free_full(reload->restart_list, free);
     free(reload);
 }
 
 GHashTable *reload_hash = NULL;
 static GListPtr
 get_rsc_restart_list(lrmd_rsc_info_t * rsc, lrmd_event_data_t * op)
 {
     int len = 0;
     char *key = NULL;
     char *copy = NULL;
     const char *value = NULL;
     const char *provider = NULL;
 
     xmlNode *param = NULL;
     xmlNode *params = NULL;
     xmlNode *actions = NULL;
     xmlNode *metadata = NULL;
 
     time_t now = time(NULL);
     reload_data_t *reload = NULL;
 
     if (reload_hash == NULL) {
         reload_hash = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_reload);
     }
 
     provider = rsc->provider;
     if (provider == NULL) {
         provider = "heartbeat";
     }
 
     len = strlen(rsc->type) + strlen(rsc->class) + strlen(provider) + 4;
     key = malloc(len);
     if(key) {
         snprintf(key, len, "%s::%s:%s", rsc->type, rsc->class, provider);
         reload = g_hash_table_lookup(reload_hash, key);
     }
 
     if (reload && ((now - 9) > reload->last_query)
         && safe_str_eq(op->op_type, RSC_START)) {
         reload = NULL;          /* re-query */
     }
 
     if (reload == NULL) {
         xmlNode *action = NULL;
 
         reload = calloc(1, sizeof(reload_data_t));
         g_hash_table_replace(reload_hash, key, reload);
 
         reload->last_query = now;
         reload->key = key;
         key = NULL;
         reload->metadata = get_rsc_metadata(rsc->type, rsc->class, provider);
 
         if(reload->metadata == NULL) {
             goto cleanup;
         }
 
         metadata = string2xml(reload->metadata);
         if (metadata == NULL) {
             crm_err("Metadata for %s::%s:%s is not valid XML",
                     rsc->provider, rsc->class, rsc->type);
             goto cleanup;
         }
 
         actions = find_xml_node(metadata, "actions", TRUE);
 
         for (action = __xml_first_child(actions); action != NULL; action = __xml_next(action)) {
             if (crm_str_eq((const char *)action->name, "action", TRUE)) {
                 value = crm_element_value(action, "name");
                 if (safe_str_eq("reload", value)) {
                     reload->can_reload = TRUE;
                     break;
                 }
             }
         }
 
         if (reload->can_reload == FALSE) {
             goto cleanup;
         }
 
         params = find_xml_node(metadata, "parameters", TRUE);
         for (param = __xml_first_child(params); param != NULL; param = __xml_next(param)) {
             if (crm_str_eq((const char *)param->name, "parameter", TRUE)) {
                 value = crm_element_value(param, "unique");
                 if (crm_is_true(value)) {
                     value = crm_element_value(param, "name");
                     if (value == NULL) {
                         crm_err("%s: NULL param", key);
                         continue;
                     }
                     crm_debug("Attr %s is not reloadable", value);
                     copy = strdup(value);
                     CRM_CHECK(copy != NULL, continue);
                     reload->restart_list = g_list_append(reload->restart_list, copy);
                 }
             }
         }
     }
 
   cleanup:
     free(key);
     free_xml(metadata);
     return reload->restart_list;
 }
 
 static void
 append_restart_list(lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, xmlNode * update,
                     const char *version)
 {
     int len = 0;
     char *list = NULL;
     char *digest = NULL;
     const char *value = NULL;
     xmlNode *restart = NULL;
     GListPtr restart_list = NULL;
     GListPtr lpc = NULL;
 
     if (op->interval > 0) {
         /* monitors are not reloadable */
         return;
 
     } else if (op->params == NULL) {
         crm_debug("%s has no parameters", ID(update));
         return;
 
     } else if (rsc == NULL) {
         return;
 
     } else if (crm_str_eq(CRMD_ACTION_STOP, op->op_type, TRUE)) {
         /* Stopped resources don't need to be reloaded */
         return;
 
     } else if (compare_version("1.0.8", version) > 0) {
         /* Caller version does not support reloads */
         return;
     }
 
     restart_list = get_rsc_restart_list(rsc, op);
     if (restart_list == NULL) {
         /* Resource does not support reloads */
         return;
     }
 
     restart = create_xml_node(NULL, XML_TAG_PARAMS);
     for (lpc = restart_list; lpc != NULL; lpc = lpc->next) {
         const char *param = (const char *)lpc->data;
 
         int start = len;
 
         CRM_CHECK(param != NULL, continue);
         value = g_hash_table_lookup(op->params, param);
         if (value != NULL) {
             crm_xml_add(restart, param, value);
         }
         len += strlen(param) + 2;
         list = realloc(list, len + 1);
         sprintf(list + start, " %s ", param);
     }
 
     digest = calculate_operation_digest(restart, version);
     crm_xml_add(update, XML_LRM_ATTR_OP_RESTART, list);
     crm_xml_add(update, XML_LRM_ATTR_RESTART_DIGEST, digest);
 
     crm_trace("%s: %s, %s", rsc->id, digest, list);
     crm_log_xml_trace(restart, "restart digest source");
 
     free_xml(restart);
     free(digest);
     free(list);
 }
 
 static gboolean
 build_operation_update(xmlNode * parent, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op,
                        const char *src)
 {
     int target_rc = 0;
     xmlNode *xml_op = NULL;
     const char *caller_version = CRM_FEATURE_SET;
 
     if (op == NULL) {
         return FALSE;
 
     } else if (AM_I_DC) {
 
     } else if (fsa_our_dc_version != NULL) {
         caller_version = fsa_our_dc_version;
     } else if (op->params == NULL) {
         caller_version = fsa_our_dc_version;
     } else {
         /* there is a small risk in formerly mixed clusters that
          *   it will be sub-optimal.
          * however with our upgrade policy, the update we send
          *   should still be completely supported anyway
          */
         caller_version = g_hash_table_lookup(op->params, XML_ATTR_CRM_VERSION);
         crm_debug("Falling back to operation originator version: %s", caller_version);
     }
 
     target_rc = rsc_op_expected_rc(op);
     xml_op = create_operation_update(parent, op, caller_version, target_rc, src, LOG_DEBUG);
     crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, fsa_our_uname); /* For context during triage */
 
     if (xml_op) {
         append_restart_list(rsc, op, xml_op, caller_version);
     }
     return TRUE;
 }
 
 static gboolean
 is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id)
 {
     rsc_history_t *entry = NULL;
 
     entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
     if (entry == NULL || entry->last == NULL) {
         return FALSE;
     }
 
     crm_trace("Processing %s: %s.%d=%d",
               rsc_id, entry->last->op_type, entry->last->interval, entry->last->rc);
     if (entry->last->rc == PCMK_OCF_OK && safe_str_eq(entry->last->op_type, CRMD_ACTION_STOP)) {
         return FALSE;
 
     } else if (entry->last->rc == PCMK_OCF_OK
                && safe_str_eq(entry->last->op_type, CRMD_ACTION_MIGRATE)) {
         /* a stricter check is too complex...
          * leave that to the PE
          */
         return FALSE;
 
     } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) {
         return FALSE;
 
     } else if (entry->last->interval == 0 && entry->last->rc == PCMK_OCF_NOT_CONFIGURED) {
         /* Badly configured resources can't be reliably stopped */
         return FALSE;
     }
 
     return TRUE;
 }
 
 static gboolean
 build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list)
 {
     GHashTableIter iter;
     rsc_history_t *entry = NULL;
 
     g_hash_table_iter_init(&iter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) {
 
         GList *gIter = NULL;
         xmlNode *xml_rsc = create_xml_node(rsc_list, XML_LRM_TAG_RESOURCE);
 
         crm_xml_add(xml_rsc, XML_ATTR_ID, entry->id);
         crm_xml_add(xml_rsc, XML_ATTR_TYPE, entry->rsc.type);
         crm_xml_add(xml_rsc, XML_AGENT_ATTR_CLASS, entry->rsc.class);
         crm_xml_add(xml_rsc, XML_AGENT_ATTR_PROVIDER, entry->rsc.provider);
 
         if (entry->last && entry->last->params) {
             const char *container = g_hash_table_lookup(entry->last->params, CRM_META"_"XML_RSC_ATTR_CONTAINER);
             if (container) {
                 crm_trace("Resource %s is a part of container resource %s", entry->id, container);
                 crm_xml_add(xml_rsc, XML_RSC_ATTR_CONTAINER, container);
             }
         }
         build_operation_update(xml_rsc, &(entry->rsc), entry->last, __FUNCTION__);
         build_operation_update(xml_rsc, &(entry->rsc), entry->failed, __FUNCTION__);
         for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) {
             build_operation_update(xml_rsc, &(entry->rsc), gIter->data, __FUNCTION__);
         }
     }
 
     return FALSE;
 }
 
 xmlNode *
 do_lrm_query_internal(lrm_state_t * lrm_state, gboolean is_replace)
 {
     xmlNode *xml_result = NULL;
     xmlNode *xml_state = NULL;
     xmlNode *xml_data = NULL;
     xmlNode *rsc_list = NULL;
     const char *uuid = NULL;
 
     if (safe_str_eq(lrm_state->node_name, fsa_our_uname)) {
         crm_node_t *peer = crm_get_peer(0, lrm_state->node_name);
         xml_state = do_update_node_cib(peer, node_update_cluster|node_update_peer, NULL, __FUNCTION__);
         /* The next two lines shouldn't be necessary for newer DCs */
         crm_xml_add(xml_state, XML_NODE_JOIN_STATE, CRMD_JOINSTATE_MEMBER);
         crm_xml_add(xml_state, XML_NODE_EXPECTED, CRMD_JOINSTATE_MEMBER);
         uuid = fsa_our_uuid;
 
     } else {
         xml_state = create_xml_node(NULL, XML_CIB_TAG_STATE);
         crm_xml_add(xml_state, XML_NODE_IS_REMOTE, "true");
         crm_xml_add(xml_state, XML_ATTR_ID, lrm_state->node_name);
         crm_xml_add(xml_state, XML_ATTR_UNAME, lrm_state->node_name);
         uuid = lrm_state->node_name;
     }
 
     xml_data = create_xml_node(xml_state, XML_CIB_TAG_LRM);
     crm_xml_add(xml_data, XML_ATTR_ID, uuid);
     rsc_list = create_xml_node(xml_data, XML_LRM_TAG_RESOURCES);
 
     /* Build a list of active (not always running) resources */
     build_active_RAs(lrm_state, rsc_list);
 
     xml_result = create_cib_fragment(xml_state, XML_CIB_TAG_STATUS);
     crm_log_xml_trace(xml_state, "Current state of the LRM");
     free_xml(xml_state);
 
     return xml_result;
 }
 
 xmlNode *
 do_lrm_query(gboolean is_replace, const char *node_name)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (!lrm_state) {
         crm_err("Could not query lrm state for lrmd node %s", node_name);
         return NULL;
     }
     return do_lrm_query_internal(lrm_state, is_replace);
 }
 
 static void
 notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc)
 {
     lrmd_event_data_t *op = NULL;
     const char *from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM);
     const char *from_host = crm_element_value(input->msg, F_CRM_HOST_FROM);
 
     crm_info("Notifying %s on %s that %s was%s deleted",
              from_sys, from_host, rsc_id, rc == pcmk_ok ? "" : " not");
 
     op = construct_op(lrm_state, input->xml, rsc_id, CRMD_ACTION_DELETE);
     CRM_ASSERT(op != NULL);
 
     if (rc == pcmk_ok) {
         op->op_status = PCMK_LRM_OP_DONE;
         op->rc = PCMK_OCF_OK;
     } else {
         op->op_status = PCMK_LRM_OP_ERROR;
         op->rc = PCMK_OCF_UNKNOWN_ERROR;
     }
 
     send_direct_ack(from_host, from_sys, NULL, op, rsc_id);
     lrmd_free_event(op);
 
     if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) {
         /* this isn't expected - trigger a new transition */
         time_t now = time(NULL);
         char *now_s = crm_itoa(now);
 
         crm_debug("Triggering a refresh after %s deleted %s from the LRM", from_sys, rsc_id);
 
         update_attr_delegate(fsa_cib_conn, cib_none, XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL,
                              "last-lrm-refresh", now_s, FALSE, NULL);
 
         free(now_s);
     }
 }
 
 static gboolean
 lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data)
 {
     struct delete_event_s *event = user_data;
     struct pending_deletion_op_s *op = value;
 
     if (safe_str_eq(event->rsc, op->rsc)) {
         notify_deleted(event->lrm_state, op->input, event->rsc, event->rc);
         return TRUE;
     }
     return FALSE;
 }
 
 static gboolean
 lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data)
 {
     const char *rsc = user_data;
     struct recurring_op_s *pending = value;
 
     if (safe_str_eq(rsc, pending->rsc_id)) {
         crm_info("Removing op %s:%d for deleted resource %s",
                  pending->op_key, pending->call_id, rsc);
         return TRUE;
     }
     return FALSE;
 }
 
 /*
  * Remove the rsc from the CIB
  *
  * Avoids refreshing the entire LRM section of this host
  */
 #define rsc_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']"
 
 static int
 delete_rsc_status(lrm_state_t * lrm_state, const char *rsc_id, int call_options,
                   const char *user_name)
 {
     char *rsc_xpath = NULL;
     int max = 0;
     int rc = pcmk_ok;
 
     CRM_CHECK(rsc_id != NULL, return -ENXIO);
 
     max = strlen(rsc_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + 1;
     rsc_xpath = calloc(1, max);
     snprintf(rsc_xpath, max, rsc_template, lrm_state->node_name, rsc_id);
 
     rc = cib_internal_op(fsa_cib_conn, CIB_OP_DELETE, NULL, rsc_xpath,
                          NULL, NULL, call_options | cib_xpath, user_name);
 
     free(rsc_xpath);
     return rc;
 }
 
 static void
 delete_rsc_entry(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id,
                  GHashTableIter * rsc_gIter, int rc, const char *user_name)
 {
     struct delete_event_s event;
 
     CRM_CHECK(rsc_id != NULL, return);
 
     if (rc == pcmk_ok) {
         char *rsc_id_copy = strdup(rsc_id);
 
         if (rsc_gIter)
             g_hash_table_iter_remove(rsc_gIter);
         else
             g_hash_table_remove(lrm_state->resource_history, rsc_id_copy);
         crm_debug("sync: Sending delete op for %s", rsc_id_copy);
         delete_rsc_status(lrm_state, rsc_id_copy, cib_quorum_override, user_name);
 
         g_hash_table_foreach_remove(lrm_state->pending_ops, lrm_remove_deleted_op, rsc_id_copy);
         free(rsc_id_copy);
     }
 
     if (input) {
         notify_deleted(lrm_state, input, rsc_id, rc);
     }
 
     event.rc = rc;
     event.rsc = rsc_id;
     event.lrm_state = lrm_state;
     g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event);
 }
 
 /*
  * Remove the op from the CIB
  *
  * Avoids refreshing the entire LRM section of this host
  */
 
 #define op_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s']"
 #define op_call_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s' and @"XML_LRM_ATTR_CALLID"='%d']"
 
 static void
 delete_op_entry(lrm_state_t * lrm_state, lrmd_event_data_t * op, const char *rsc_id,
                 const char *key, int call_id)
 {
     xmlNode *xml_top = NULL;
 
     if (op != NULL) {
         xml_top = create_xml_node(NULL, XML_LRM_TAG_RSC_OP);
         crm_xml_add_int(xml_top, XML_LRM_ATTR_CALLID, op->call_id);
         crm_xml_add(xml_top, XML_ATTR_TRANSITION_KEY, op->user_data);
 
         if (op->interval > 0) {
             char *op_id = generate_op_key(op->rsc_id, op->op_type, op->interval);
 
             /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */
             crm_xml_add(xml_top, XML_ATTR_ID, op_id);
             free(op_id);
         }
 
         crm_debug("async: Sending delete op for %s_%s_%d (call=%d)",
                   op->rsc_id, op->op_type, op->interval, op->call_id);
 
         fsa_cib_conn->cmds->delete(fsa_cib_conn, XML_CIB_TAG_STATUS, xml_top, cib_quorum_override);
 
     } else if (rsc_id != NULL && key != NULL) {
         int max = 0;
         char *op_xpath = NULL;
 
         if (call_id > 0) {
             max =
                 strlen(op_call_template) + strlen(rsc_id) + strlen(lrm_state->node_name) +
                 strlen(key) + 10;
             op_xpath = calloc(1, max);
             snprintf(op_xpath, max, op_call_template, lrm_state->node_name, rsc_id, key, call_id);
 
         } else {
             max =
                 strlen(op_template) + strlen(rsc_id) + strlen(lrm_state->node_name) + strlen(key) +
                 1;
             op_xpath = calloc(1, max);
             snprintf(op_xpath, max, op_template, lrm_state->node_name, rsc_id, key);
         }
 
         crm_debug("sync: Sending delete op for %s (call=%d)", rsc_id, call_id);
         fsa_cib_conn->cmds->delete(fsa_cib_conn, op_xpath, NULL, cib_quorum_override | cib_xpath);
 
         free(op_xpath);
 
     } else {
         crm_err("Not enough information to delete op entry: rsc=%p key=%p", rsc_id, key);
         return;
     }
 
     crm_log_xml_trace(xml_top, "op:cancel");
     free_xml(xml_top);
 }
 
 void
 lrm_clear_last_failure(const char *rsc_id, const char *node_name)
 {
     char *attr = NULL;
     GHashTableIter iter;
     GList *lrm_state_list = lrm_state_get_list();
     GList *state_entry;
     rsc_history_t *entry = NULL;
 
     attr = generate_op_key(rsc_id, "last_failure", 0);
 
     /* This clears last failure for every lrm state that has this rsc.*/
     for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) {
         lrm_state_t *lrm_state = state_entry->data;
 
         if (node_name != NULL) {
             if (strcmp(node_name, lrm_state->node_name) != 0) {
                 /* filter by node_name if node_name is present */
                 continue;
             }
         }
 
         delete_op_entry(lrm_state, NULL, rsc_id, attr, 0);
 
         if (!lrm_state->resource_history) {
             continue;
         }
 
         g_hash_table_iter_init(&iter, lrm_state->resource_history);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) {
             if (safe_str_eq(rsc_id, entry->id)) {
                 lrmd_free_event(entry->failed);
                 entry->failed = NULL;
             }
         }
     }
     free(attr);
     g_list_free(lrm_state_list);
 }
 
 /* Returns: gboolean - cancellation is in progress */
 static gboolean
 cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove)
 {
     int rc = pcmk_ok;
     struct recurring_op_s *pending = NULL;
 
     CRM_CHECK(op != 0, return FALSE);
     CRM_CHECK(rsc_id != NULL, return FALSE);
     if (key == NULL) {
         key = make_stop_id(rsc_id, op);
     }
     pending = g_hash_table_lookup(lrm_state->pending_ops, key);
 
     if (pending) {
         if (remove && pending->remove == FALSE) {
             pending->remove = TRUE;
             crm_debug("Scheduling %s for removal", key);
         }
 
         if (pending->cancelled) {
             crm_debug("Operation %s already cancelled", key);
             return FALSE;
         }
 
         pending->cancelled = TRUE;
 
     } else {
         crm_info("No pending op found for %s", key);
         return FALSE;
     }
 
     crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key);
     rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type, pending->interval);
     if (rc == pcmk_ok) {
         crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key);
         return TRUE;
     }
 
     crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key);
     /* The caller needs to make sure the entry is
      * removed from the pending_ops list
      *
      * Usually by returning TRUE inside the worker function
      * supplied to g_hash_table_foreach_remove()
      *
      * Not removing the entry from pending_ops will block
      * the node from shutting down
      */
     return FALSE;
 }
 
 struct cancel_data {
     gboolean done;
     gboolean remove;
     const char *key;
     lrmd_rsc_info_t *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean
 cancel_action_by_key(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     struct cancel_data *data = user_data;
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     if (safe_str_eq(op->op_key, data->key)) {
         data->done = TRUE;
         remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove);
     }
     return remove;
 }
 
 static gboolean
 cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove)
 {
     guint removed = 0;
     struct cancel_data data;
 
     CRM_CHECK(rsc != NULL, return FALSE);
     CRM_CHECK(key != NULL, return FALSE);
 
     data.key = key;
     data.rsc = rsc;
     data.done = FALSE;
     data.remove = remove;
     data.lrm_state = lrm_state;
 
     removed = g_hash_table_foreach_remove(lrm_state->pending_ops, cancel_action_by_key, &data);
     crm_trace("Removed %u op cache entries, new size: %u",
               removed, g_hash_table_size(lrm_state->pending_ops));
     return data.done;
 }
 
 static lrmd_rsc_info_t *
 get_lrm_resource(lrm_state_t * lrm_state, xmlNode * resource, xmlNode * op_msg, gboolean do_create)
 {
     lrmd_rsc_info_t *rsc = NULL;
     const char *id = ID(resource);
     const char *type = crm_element_value(resource, XML_ATTR_TYPE);
     const char *class = crm_element_value(resource, XML_AGENT_ATTR_CLASS);
     const char *provider = crm_element_value(resource, XML_AGENT_ATTR_PROVIDER);
     const char *long_id = crm_element_value(resource, XML_ATTR_ID_LONG);
 
     crm_trace("Retrieving %s from the LRM.", id);
     CRM_CHECK(id != NULL, return NULL);
 
     rsc = lrm_state_get_rsc_info(lrm_state, id, 0);
 
     if (!rsc && long_id) {
         rsc = lrm_state_get_rsc_info(lrm_state, long_id, 0);
     }
 
     if (!rsc && do_create) {
         CRM_CHECK(class != NULL, return NULL);
         CRM_CHECK(type != NULL, return NULL);
 
         crm_trace("Adding rsc %s before operation", id);
 
         lrm_state_register_rsc(lrm_state, id, class, provider, type, lrmd_opt_drop_recurring);
 
         rsc = lrm_state_get_rsc_info(lrm_state, id, 0);
 
         if (!rsc) {
             fsa_data_t *msg_data = NULL;
 
             crm_err("Could not add resource %s to LRM", id);
             register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
         }
     }
 
     return rsc;
 }
 
 static void
 delete_resource(lrm_state_t * lrm_state,
                 const char *id,
                 lrmd_rsc_info_t * rsc,
                 GHashTableIter * gIter,
                 const char *sys, const char *host, const char *user, ha_msg_input_t * request)
 {
     int rc = pcmk_ok;
 
     crm_info("Removing resource %s for %s (%s) on %s", id, sys, user ? user : "internal", host);
 
     if (rsc) {
         rc = lrm_state_unregister_rsc(lrm_state, id, 0);
     }
 
     if (rc == pcmk_ok) {
         crm_trace("Resource '%s' deleted", id);
     } else if (rc == -EINPROGRESS) {
         crm_info("Deletion of resource '%s' pending", id);
         if (request) {
             struct pending_deletion_op_s *op = NULL;
             char *ref = crm_element_value_copy(request->msg, XML_ATTR_REFERENCE);
 
             op = calloc(1, sizeof(struct pending_deletion_op_s));
             op->rsc = strdup(rsc->id);
             op->input = copy_ha_msg_input(request);
             g_hash_table_insert(lrm_state->deletion_ops, ref, op);
         }
         return;
     } else {
         crm_warn("Deletion of resource '%s' for %s (%s) on %s failed: %d",
                  id, sys, user ? user : "internal", host, rc);
     }
 
     delete_rsc_entry(lrm_state, request, id, gIter, rc, user);
 }
 
 /*	 A_LRM_INVOKE	*/
 void
 do_lrm_invoke(long long action,
               enum crmd_fsa_cause cause,
               enum crmd_fsa_state cur_state,
               enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     gboolean create_rsc = TRUE;
     lrm_state_t *lrm_state = NULL;
     const char *crm_op = NULL;
     const char *from_sys = NULL;
     const char *from_host = NULL;
     const char *operation = NULL;
     ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
     const char *user_name = NULL;
     const char *target_node = NULL;
     gboolean is_remote_node = FALSE;
 
     if (input->xml != NULL) {
         /* Remote node operations are routed here to their remote connections */
         target_node = crm_element_value(input->xml, XML_LRM_ATTR_TARGET);
     }
     if (target_node == NULL) {
         target_node = fsa_our_uname;
     } else if (safe_str_neq(target_node, fsa_our_uname)) {
         is_remote_node = TRUE;
     }
 
     lrm_state = lrm_state_find(target_node);
 
     if (lrm_state == NULL && is_remote_node) {
         crm_err("no lrmd connection for remote node %s found on cluster node %s. Can not process request.",
             target_node, fsa_our_uname);
         return;
     }
 
     CRM_ASSERT(lrm_state != NULL);
 
 #if ENABLE_ACL
     user_name = crm_element_value(input->msg, F_CRM_USER);
     crm_trace("LRM command from user '%s'", user_name);
 #endif
 
     crm_op = crm_element_value(input->msg, F_CRM_TASK);
     from_sys = crm_element_value(input->msg, F_CRM_SYS_FROM);
     if (safe_str_neq(from_sys, CRM_SYSTEM_TENGINE)) {
         from_host = crm_element_value(input->msg, F_CRM_HOST_FROM);
     }
 
     crm_trace("LRM command from: %s", from_sys);
 
     if (safe_str_eq(crm_op, CRM_OP_LRM_DELETE)) {
         operation = CRMD_ACTION_DELETE;
 
     } else if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) {
         operation = CRM_OP_LRM_REFRESH;
 
     } else if (safe_str_eq(crm_op, CRM_OP_LRM_FAIL)) {
         rsc_history_t *entry = NULL;
         lrmd_event_data_t *op = NULL;
         lrmd_rsc_info_t *rsc = NULL;
         xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE);
 
         CRM_CHECK(xml_rsc != NULL, return);
 
         /* The lrmd can not fail a resource, it does not understand the
          * concept of success or failure in relation to a resource, it simply
          * executes operations and reports the results. We determine what a failure is.
          * Becaues of this, if we want to fail a resource we have to fake what we
          * understand a failure to look like.
          *
          * To do this we create a fake lrmd operation event for the resource
          * we want to fail.  We then pass that event to the lrmd client callback
          * so it will be processed as if it actually came from the lrmd. */
         op = construct_op(lrm_state, input->xml, ID(xml_rsc), "asyncmon");
         CRM_ASSERT(op != NULL);
 
         free((char *)op->user_data);
         op->user_data = NULL;
         entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id);
         /* Make sure the call id is greater than the last successful operation,
          * otherwise the failure will not result in a possible recovery of the resource
          * as it could appear the failure occurred before the successful start */
         if (entry) {
             op->call_id = entry->last_callid + 1;
             if (op->call_id < 0) {
                 op->call_id = 1;
             }
         }
         op->interval = 0;
         op->op_status = PCMK_LRM_OP_DONE;
         op->rc = PCMK_OCF_UNKNOWN_ERROR;
         op->t_run = time(NULL);
         op->t_rcchange = op->t_run;
 
 #if ENABLE_ACL
         if (user_name && is_privileged(user_name) == FALSE) {
             crm_err("%s does not have permission to fail %s", user_name, ID(xml_rsc));
             send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc));
             lrmd_free_event(op);
             return;
         }
 #endif
 
         rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc);
         if (rsc) {
             crm_info("Failing resource %s...", rsc->id);
             process_lrm_event(lrm_state, op);
             op->op_status = PCMK_LRM_OP_DONE;
             op->rc = PCMK_OCF_OK;
             lrmd_free_rsc_info(rsc);
         } else {
             crm_info("Cannot find/create resource in order to fail it...");
             crm_log_xml_warn(input->msg, "bad input");
         }
 
         send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc));
         lrmd_free_event(op);
         return;
 
     } else if (input->xml != NULL) {
         operation = crm_element_value(input->xml, XML_LRM_ATTR_TASK);
     }
 
     if (safe_str_eq(crm_op, CRM_OP_LRM_REFRESH)) {
         int rc = pcmk_ok;
         xmlNode *fragment = do_lrm_query_internal(lrm_state, TRUE);
 
         fsa_cib_update(XML_CIB_TAG_STATUS, fragment, cib_quorum_override, rc, user_name);
         crm_info("Forced a local LRM refresh: call=%d", rc);
 
         if(strcmp(CRM_SYSTEM_CRMD, from_sys) != 0) {
             xmlNode *reply = create_request(
                 CRM_OP_INVOKE_LRM, fragment,
                 from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid);
 
             crm_debug("ACK'ing refresh from %s (%s)", from_sys, from_host);
 
             if (relay_message(reply, TRUE) == FALSE) {
                 crm_log_xml_err(reply, "Unable to route reply");
             }
             free_xml(reply);
         }
 
         free_xml(fragment);
 
     } else if (safe_str_eq(crm_op, CRM_OP_LRM_QUERY)) {
         xmlNode *data = do_lrm_query_internal(lrm_state, FALSE);
         xmlNode *reply = create_reply(input->msg, data);
 
         if (relay_message(reply, TRUE) == FALSE) {
             crm_err("Unable to route reply");
             crm_log_xml_err(reply, "reply");
         }
         free_xml(reply);
         free_xml(data);
 
     } else if (safe_str_eq(operation, CRM_OP_PROBED)) {
         update_attrd(lrm_state->node_name, CRM_OP_PROBED, XML_BOOLEAN_TRUE, user_name, is_remote_node);
 
     } else if (safe_str_eq(operation, CRM_OP_REPROBE) || safe_str_eq(crm_op, CRM_OP_REPROBE)) {
         GHashTableIter gIter;
         rsc_history_t *entry = NULL;
 
         crm_notice("Forcing the status of all resources to be redetected");
 
         g_hash_table_iter_init(&gIter, lrm_state->resource_history);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
             delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys, from_host,
                             user_name, NULL);
         }
 
         /* Now delete the copy in the CIB */
         erase_status_tag(lrm_state->node_name, XML_CIB_TAG_LRM, cib_scope_local);
 
         /* And finally, _delete_ the value in attrd
          * Setting it to FALSE results in the PE sending us back here again
          */
         update_attrd(lrm_state->node_name, CRM_OP_PROBED, NULL, user_name, is_remote_node);
 
         if(strcmp(CRM_SYSTEM_TENGINE, from_sys) != 0
            && strcmp(CRM_SYSTEM_TENGINE, from_sys) != 0) {
             xmlNode *reply = create_request(
                 CRM_OP_INVOKE_LRM, NULL,
                 from_host, from_sys, CRM_SYSTEM_LRMD, fsa_our_uuid);
 
             crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host);
 
             if (relay_message(reply, TRUE) == FALSE) {
                 crm_log_xml_err(reply, "Unable to route reply");
             }
             free_xml(reply);
         }
 
     } else if (operation != NULL) {
         lrmd_rsc_info_t *rsc = NULL;
         xmlNode *params = NULL;
         xmlNode *xml_rsc = find_xml_node(input->xml, XML_CIB_TAG_RESOURCE, TRUE);
 
         CRM_CHECK(xml_rsc != NULL, return);
 
         /* only the first 16 chars are used by the LRM */
         params = find_xml_node(input->xml, XML_TAG_ATTRS, TRUE);
 
         if (safe_str_eq(operation, CRMD_ACTION_DELETE)) {
             create_rsc = FALSE;
         }
 
         rsc = get_lrm_resource(lrm_state, xml_rsc, input->xml, create_rsc);
 
         if (rsc == NULL && create_rsc) {
             crm_err("Invalid resource definition");
             crm_log_xml_warn(input->msg, "bad input");
 
         } else if (rsc == NULL) {
             lrmd_event_data_t *op = NULL;
 
             crm_notice("Not creating resource for a %s event: %s", operation, ID(input->xml));
             delete_rsc_entry(lrm_state, input, ID(xml_rsc), NULL, pcmk_ok, user_name);
 
             op = construct_op(lrm_state, input->xml, ID(xml_rsc), operation);
             op->op_status = PCMK_LRM_OP_DONE;
             op->rc = PCMK_OCF_OK;
             CRM_ASSERT(op != NULL);
             send_direct_ack(from_host, from_sys, NULL, op, ID(xml_rsc));
             lrmd_free_event(op);
 
         } else if (safe_str_eq(operation, CRMD_ACTION_CANCEL)) {
             char *op_key = NULL;
             char *meta_key = NULL;
             int call = 0;
             const char *call_id = NULL;
             const char *op_task = NULL;
             const char *op_interval = NULL;
             gboolean in_progress = FALSE;
 
             CRM_CHECK(params != NULL, crm_log_xml_warn(input->xml, "Bad command");
                       return);
 
             meta_key = crm_meta_name(XML_LRM_ATTR_INTERVAL);
             op_interval = crm_element_value(params, meta_key);
             free(meta_key);
 
             meta_key = crm_meta_name(XML_LRM_ATTR_TASK);
             op_task = crm_element_value(params, meta_key);
             free(meta_key);
 
             meta_key = crm_meta_name(XML_LRM_ATTR_CALLID);
             call_id = crm_element_value(params, meta_key);
             free(meta_key);
 
             CRM_CHECK(op_task != NULL, crm_log_xml_warn(input->xml, "Bad command");
                       return);
             CRM_CHECK(op_interval != NULL, crm_log_xml_warn(input->xml, "Bad command");
                       return);
 
             op_key = generate_op_key(rsc->id, op_task, crm_parse_int(op_interval, "0"));
 
             crm_debug("PE requested op %s (call=%s) be cancelled",
                       op_key, call_id ? call_id : "NA");
             call = crm_parse_int(call_id, "0");
             if (call == 0) {
                 /* the normal case when the PE cancels a recurring op */
                 in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE);
 
             } else {
                 /* the normal case when the PE cancels an orphan op */
                 in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE);
             }
 
             if (in_progress == FALSE) {
                 lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc->id, op_task);
 
                 crm_info("Nothing known about operation %d for %s", call, op_key);
                 delete_op_entry(lrm_state, NULL, rsc->id, op_key, call);
 
                 CRM_ASSERT(op != NULL);
 
                 op->rc = PCMK_OCF_OK;
                 op->op_status = PCMK_LRM_OP_DONE;
                 send_direct_ack(from_host, from_sys, rsc, op, rsc->id);
                 lrmd_free_event(op);
 
                 /* needed?? surely not otherwise the cancel_op_(_key) wouldn't
                  * have failed in the first place
                  */
                 g_hash_table_remove(lrm_state->pending_ops, op_key);
             }
 
             free(op_key);
 
         } else if (rsc != NULL && safe_str_eq(operation, CRMD_ACTION_DELETE)) {
 
 #if ENABLE_ACL
             int cib_rc = delete_rsc_status(lrm_state, rsc->id, cib_dryrun | cib_sync_call, user_name);
             if (cib_rc != pcmk_ok) {
                 lrmd_event_data_t *op = NULL;
 
                 crm_err
                     ("Attempted deletion of resource status '%s' from CIB for %s (user=%s) on %s failed: (rc=%d) %s",
                      rsc->id, from_sys, user_name ? user_name : "unknown", from_host, cib_rc,
                      pcmk_strerror(cib_rc));
 
                 op = construct_op(lrm_state, input->xml, rsc->id, operation);
                 op->op_status = PCMK_LRM_OP_ERROR;
 
                 if (cib_rc == -EACCES) {
                     op->rc = PCMK_OCF_INSUFFICIENT_PRIV;
                 } else {
                     op->rc = PCMK_OCF_UNKNOWN_ERROR;
                 }
                 send_direct_ack(from_host, from_sys, NULL, op, rsc->id);
                 lrmd_free_event(op);
                 return;
             }
 #endif
             delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys, from_host, user_name, input);
 
         } else if (rsc != NULL) {
             do_lrm_rsc_op(lrm_state, rsc, operation, input->xml, input->msg);
         }
 
         lrmd_free_rsc_info(rsc);
 
     } else {
         crm_err("Operation was neither a lrm_query, nor a rsc op.  %s", crm_str(crm_op));
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 static lrmd_event_data_t *
 construct_op(lrm_state_t * lrm_state, xmlNode * rsc_op, const char *rsc_id, const char *operation)
 {
     lrmd_event_data_t *op = NULL;
     const char *op_delay = NULL;
     const char *op_timeout = NULL;
     const char *op_interval = NULL;
     GHashTable *params = NULL;
 
     const char *transition = NULL;
 
     CRM_LOG_ASSERT(rsc_id != NULL);
 
     op = calloc(1, sizeof(lrmd_event_data_t));
     op->type = lrmd_event_exec_complete;
     op->op_type = strdup(operation);
     op->op_status = PCMK_LRM_OP_PENDING;
     op->rc = -1;
     op->rsc_id = strdup(rsc_id);
     op->interval = 0;
     op->timeout = 0;
     op->start_delay = 0;
 
     if (rsc_op == NULL) {
         CRM_LOG_ASSERT(safe_str_eq(CRMD_ACTION_STOP, operation));
         op->user_data = NULL;
         /* the stop_all_resources() case
          * by definition there is no DC (or they'd be shutting
          *   us down).
          * So we should put our version here.
          */
         op->params = g_hash_table_new_full(crm_str_hash, g_str_equal,
                                            g_hash_destroy_str, g_hash_destroy_str);
 
         g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
 
         crm_trace("Constructed %s op for %s", operation, rsc_id);
         return op;
     }
 
     params = xml2list(rsc_op);
     g_hash_table_remove(params, CRM_META "_op_target_rc");
 
     op_delay = crm_meta_value(params, XML_OP_ATTR_START_DELAY);
     op_timeout = crm_meta_value(params, XML_ATTR_TIMEOUT);
     op_interval = crm_meta_value(params, XML_LRM_ATTR_INTERVAL);
 
     op->interval = crm_parse_int(op_interval, "0");
     op->timeout = crm_parse_int(op_timeout, "0");
     op->start_delay = crm_parse_int(op_delay, "0");
 
     if (safe_str_neq(operation, RSC_STOP)) {
         op->params = params;
 
     } else {
         rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
 
         /* If we do not have stop parameters cached, use
          * whatever we are given */
         if (!entry || !entry->stop_params) {
             op->params = params;
         } else {
             /* Copy the cached parameter list so that we stop the resource
              * with the old attributes, not the new ones */
             op->params = g_hash_table_new_full(crm_str_hash, g_str_equal,
                                                g_hash_destroy_str, g_hash_destroy_str);
 
             g_hash_table_foreach(params, copy_meta_keys, op->params);
             g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params);
             g_hash_table_destroy(params);
             params = NULL;
         }
     }
 
     /* sanity */
     if (op->interval < 0) {
         op->interval = 0;
     }
     if (op->timeout <= 0) {
         op->timeout = op->interval;
     }
     if (op->start_delay < 0) {
         op->start_delay = 0;
     }
 
     transition = crm_element_value(rsc_op, XML_ATTR_TRANSITION_KEY);
     CRM_CHECK(transition != NULL, return op);
 
     op->user_data = strdup(transition);
 
     if (op->interval != 0) {
         if (safe_str_eq(operation, CRMD_ACTION_START)
             || safe_str_eq(operation, CRMD_ACTION_STOP)) {
             crm_err("Start and Stop actions cannot have an interval: %d", op->interval);
             op->interval = 0;
         }
     }
 
     crm_trace("Constructed %s op for %s: interval=%d", operation, rsc_id, op->interval);
 
     return op;
 }
 
 void
 send_direct_ack(const char *to_host, const char *to_sys,
                 lrmd_rsc_info_t * rsc, lrmd_event_data_t * op, const char *rsc_id)
 {
     xmlNode *reply = NULL;
     xmlNode *update, *iter;
     xmlNode *fragment;
     crm_node_t *peer = NULL;
 
     CRM_CHECK(op != NULL, return);
     if (op->rsc_id == NULL) {
         CRM_LOG_ASSERT(rsc_id != NULL);
         op->rsc_id = strdup(rsc_id);
     }
     if (to_sys == NULL) {
         to_sys = CRM_SYSTEM_TENGINE;
     }
 
     peer = crm_get_peer(0, fsa_our_uname);
     update = do_update_node_cib(peer, node_update_none, NULL, __FUNCTION__);
 
     iter = create_xml_node(update, XML_CIB_TAG_LRM);
     crm_xml_add(iter, XML_ATTR_ID, fsa_our_uuid);
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES);
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE);
 
     crm_xml_add(iter, XML_ATTR_ID, op->rsc_id);
 
     build_operation_update(iter, rsc, op, __FUNCTION__);
     fragment = create_cib_fragment(update, XML_CIB_TAG_STATUS);
 
     reply = create_request(CRM_OP_INVOKE_LRM, fragment, to_host, to_sys, CRM_SYSTEM_LRMD, NULL);
 
     crm_log_xml_trace(update, "ACK Update");
 
     crm_debug("ACK'ing resource op %s_%s_%d from %s: %s",
               op->rsc_id, op->op_type, op->interval, op->user_data,
               crm_element_value(reply, XML_ATTR_REFERENCE));
 
     if (relay_message(reply, TRUE) == FALSE) {
         crm_log_xml_err(reply, "Unable to route reply");
     }
 
     free_xml(fragment);
     free_xml(update);
     free_xml(reply);
 }
 
 gboolean
 verify_stopped(enum crmd_fsa_state cur_state, int log_level)
 {
     gboolean res = TRUE;
     GList *lrm_state_list = lrm_state_get_list();
     GList *state_entry;
 
     for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) {
         lrm_state_t *lrm_state = state_entry->data;
 
         if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) {
             /* keep iterating through all even when false is returned */
             res = FALSE;
         }
     }
 
     set_bit(fsa_input_register, R_SENT_RSC_STOP);
     g_list_free(lrm_state_list); lrm_state_list = NULL;
     return res;
 }
 
 struct stop_recurring_action_s {
     lrmd_rsc_info_t *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean
 stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     struct stop_recurring_action_s *event = user_data;
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     if (op->interval != 0 && safe_str_eq(op->rsc_id, event->rsc->id)) {
         crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, key);
         remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE);
     }
 
     return remove;
 }
 
 static gboolean
 stop_recurring_actions(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     lrm_state_t *lrm_state = user_data;
     struct recurring_op_s *op = (struct recurring_op_s *)value;
 
     if (op->interval != 0) {
         crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, key);
         remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE);
     }
 
     return remove;
 }
 
 static void
 do_lrm_rsc_op(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *operation, xmlNode * msg,
               xmlNode * request)
 {
     int call_id = 0;
     char *op_id = NULL;
     lrmd_event_data_t *op = NULL;
     lrmd_key_value_t *params = NULL;
     fsa_data_t *msg_data = NULL;
     const char *transition = NULL;
 
     CRM_CHECK(rsc != NULL, return);
     CRM_CHECK(operation != NULL, return);
 
     if (msg != NULL) {
         transition = crm_element_value(msg, XML_ATTR_TRANSITION_KEY);
         if (transition == NULL) {
             crm_log_xml_err(msg, "Missing transition number");
         }
     }
 
     op = construct_op(lrm_state, msg, rsc->id, operation);
     CRM_CHECK(op != NULL, return);
 
     /* stop any previous monitor operations before changing the resource state */
     if (op->interval == 0
         && strcmp(operation, CRMD_ACTION_STATUS) != 0
         && strcmp(operation, CRMD_ACTION_NOTIFY) != 0) {
         guint removed = 0;
         struct stop_recurring_action_s data;
 
         data.rsc = rsc;
         data.lrm_state = lrm_state;
         removed = g_hash_table_foreach_remove(
             lrm_state->pending_ops, stop_recurring_action_by_rsc, &data);
 
         crm_debug("Stopped %u recurring operations in preparation for %s_%s_%d",
                   removed, rsc->id, operation, op->interval);
     }
 
     /* now do the op */
     crm_info("Performing key=%s op=%s_%s_%d", transition, rsc->id, operation, op->interval);
 
     if (fsa_state != S_NOT_DC && fsa_state != S_POLICY_ENGINE && fsa_state != S_TRANSITION_ENGINE) {
         if (safe_str_neq(operation, "fail")
             && safe_str_neq(operation, CRMD_ACTION_STOP)) {
             crm_info("Discarding attempt to perform action %s on %s in state %s",
                      operation, rsc->id, fsa_state2string(fsa_state));
             op->rc = 99;
             op->op_status = PCMK_LRM_OP_ERROR;
             send_direct_ack(NULL, NULL, rsc, op, rsc->id);
             lrmd_free_event(op);
             free(op_id);
             return;
         }
     }
 
     op_id = generate_op_key(rsc->id, op->op_type, op->interval);
 
     if (op->interval > 0) {
         /* cancel it so we can then restart it without conflict */
         cancel_op_key(lrm_state, rsc, op_id, FALSE);
     }
 
     if (op->params) {
         char *key = NULL;
         char *value = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, op->params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             params = lrmd_key_value_add(params, key, value);
         }
     }
 
     call_id = lrm_state_exec(lrm_state,
                              rsc->id,
                              op->op_type,
                              op->user_data, op->interval, op->timeout, op->start_delay, params);
 
     if (call_id <= 0) {
         crm_err("Operation %s on %s failed: %d", operation, rsc->id, call_id);
         register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
 
     } else {
         /* record all operations so we can wait
          * for them to complete during shutdown
          */
         char *call_id_s = make_stop_id(rsc->id, call_id);
         struct recurring_op_s *pending = NULL;
 
         pending = calloc(1, sizeof(struct recurring_op_s));
         crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s);
 
         pending->call_id = call_id;
         pending->interval = op->interval;
         pending->op_type = strdup(operation);
         pending->op_key = strdup(op_id);
         pending->rsc_id = strdup(rsc->id);
         g_hash_table_replace(lrm_state->pending_ops, call_id_s, pending);
 
         if (op->interval > 0 && op->start_delay > START_DELAY_THRESHOLD) {
             char *uuid = NULL;
             int dummy = 0, target_rc = 0;
 
             crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id);
 
             decode_transition_key(op->user_data, &uuid, &dummy, &dummy, &target_rc);
             free(uuid);
 
             op->rc = target_rc;
             op->op_status = PCMK_LRM_OP_DONE;
             send_direct_ack(NULL, NULL, rsc, op, rsc->id);
         }
     }
 
     free(op_id);
     lrmd_free_event(op);
     return;
 }
 
 int last_resource_update = 0;
 
 static void
 cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     switch (rc) {
         case pcmk_ok:
         case -pcmk_err_diff_failed:
         case -pcmk_err_diff_resync:
             crm_trace("Resource update %d complete: rc=%d", call_id, rc);
             break;
         default:
             crm_warn("Resource update %d failed: (rc=%d) %s", call_id, rc, pcmk_strerror(rc));
     }
 
     if (call_id == last_resource_update) {
         last_resource_update = 0;
         trigger_fsa(fsa_source);
     }
 }
 
 static int
 do_update_resource(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op)
 {
 /*
   <status>
   <nodes_status id=uname>
   <lrm>
   <lrm_resources>
   <lrm_resource id=...>
   </...>
 */
     int rc = pcmk_ok;
     xmlNode *update, *iter = NULL;
     int call_opt = cib_quorum_override;
     const char *uuid = NULL;
 
     CRM_CHECK(op != NULL, return 0);
 
     if (fsa_state == S_ELECTION || fsa_state == S_PENDING) {
         crm_info("Sending update to local CIB in state: %s", fsa_state2string(fsa_state));
         call_opt |= cib_scope_local;
     }
 
     iter = create_xml_node(iter, XML_CIB_TAG_STATUS);
     update = iter;
     iter = create_xml_node(iter, XML_CIB_TAG_STATE);
 
     if (safe_str_eq(lrm_state->node_name, fsa_our_uname)) {
         uuid = fsa_our_uuid;
 
     } else {
         /* remote nodes uuid and uname are equal */
         uuid = lrm_state->node_name;
         crm_xml_add(iter, XML_NODE_IS_REMOTE, "true");
     }
 
     CRM_LOG_ASSERT(uuid != NULL);
     if(uuid == NULL) {
         rc = -EINVAL;
         goto done;
     }
 
     crm_xml_add(iter, XML_ATTR_UUID,  uuid);
     crm_xml_add(iter, XML_ATTR_UNAME, lrm_state->node_name);
     crm_xml_add(iter, XML_ATTR_ORIGIN, __FUNCTION__);
 
     iter = create_xml_node(iter, XML_CIB_TAG_LRM);
     crm_xml_add(iter, XML_ATTR_ID, uuid);
 
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCES);
     iter = create_xml_node(iter, XML_LRM_TAG_RESOURCE);
     crm_xml_add(iter, XML_ATTR_ID, op->rsc_id);
 
     build_operation_update(iter, rsc, op, __FUNCTION__);
 
     if (rsc) {
         const char *container = NULL;
 
         crm_xml_add(iter, XML_ATTR_TYPE, rsc->type);
         crm_xml_add(iter, XML_AGENT_ATTR_CLASS, rsc->class);
         crm_xml_add(iter, XML_AGENT_ATTR_PROVIDER, rsc->provider);
 
         if (op->params) {
             container = g_hash_table_lookup(op->params, CRM_META"_"XML_RSC_ATTR_CONTAINER);
         }
         if (container) {
             crm_trace("Resource %s is a part of container resource %s", op->rsc_id, container);
             crm_xml_add(iter, XML_RSC_ATTR_CONTAINER, container);
         }
 
         CRM_CHECK(rsc->type != NULL, crm_err("Resource %s has no value for type", op->rsc_id));
         CRM_CHECK(rsc->class != NULL, crm_err("Resource %s has no value for class", op->rsc_id));
 
     } else {
         crm_warn("Resource %s no longer exists in the lrmd", op->rsc_id);
         send_direct_ack(NULL, NULL, rsc, op, op->rsc_id);
         goto cleanup;
     }
 
     crm_log_xml_trace(update, __FUNCTION__);
 
     /* make it an asyncronous call and be done with it
      *
      * Best case:
      *   the resource state will be discovered during
      *   the next signup or election.
      *
      * Bad case:
      *   we are shutting down and there is no DC at the time,
      *   but then why were we shutting down then anyway?
      *   (probably because of an internal error)
      *
      * Worst case:
      *   we get shot for having resources "running" when the really weren't
      *
      * the alternative however means blocking here for too long, which
      * isnt acceptable
      */
     fsa_cib_update(XML_CIB_TAG_STATUS, update, call_opt, rc, NULL);
 
     if (rc > 0) {
         last_resource_update = rc;
     }
   done:
     /* the return code is a call number, not an error code */
     crm_trace("Sent resource state update message: %d for %s=%d on %s", rc,
               op->op_type, op->interval, op->rsc_id);
     fsa_register_cib_callback(rc, FALSE, NULL, cib_rsc_callback);
 
   cleanup:
     free_xml(update);
     return rc;
 }
 
 void
 do_lrm_event(long long action,
              enum crmd_fsa_cause cause,
              enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data)
 {
     CRM_CHECK(FALSE, return);
 }
 
 gboolean
 process_lrm_event(lrm_state_t * lrm_state, lrmd_event_data_t * op)
 {
     char *op_id = NULL;
     char *op_key = NULL;
 
     int update_id = 0;
     gboolean removed = FALSE;
     lrmd_rsc_info_t *rsc = NULL;
 
     struct recurring_op_s *pending = NULL;
 
     CRM_CHECK(op != NULL, return FALSE);
 
     CRM_CHECK(op->rsc_id != NULL, return FALSE);
     op_id = make_stop_id(op->rsc_id, op->call_id);
     pending = g_hash_table_lookup(lrm_state->pending_ops, op_id);
     op_key = generate_op_key(op->rsc_id, op->op_type, op->interval);
     rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0);
 
     if (op->op_status == PCMK_LRM_OP_ERROR
         && (op->rc == PCMK_OCF_RUNNING_MASTER || op->rc == PCMK_OCF_NOT_RUNNING)) {
         /* Leave it up to the TE/PE to decide if this is an error */
         op->op_status = PCMK_LRM_OP_DONE;
     }
 
     if (op->op_status != PCMK_LRM_OP_CANCELLED) {
         if (safe_str_eq(op->op_type, RSC_NOTIFY)) {
             /* Keep notify ops out of the CIB */
             send_direct_ack(NULL, NULL, NULL, op, op->rsc_id);
         } else {
             update_id = do_update_resource(lrm_state, rsc, op);
         }
     } else if (op->interval == 0) {
         /* This will occur when "crm resource cleanup" is called while actions are in-flight */
         crm_err("Op %s (call=%d): Cancelled", op_key, op->call_id);
         send_direct_ack(NULL, NULL, NULL, op, op->rsc_id);
 
     } else if (pending == NULL) {
         /* Operations that are cancelled may safely be removed
          * from the pending op list before the lrmd completion event
          * is received. Only report non-cancelled ops here. */
         if (op->op_status != PCMK_LRM_OP_CANCELLED) {
             crm_err("Op %s (call=%d): No 'pending' entry", op_key, op->call_id);
         }
     } else if (op->user_data == NULL) {
         crm_err("Op %s (call=%d): No user data", op_key, op->call_id);
 
     } else if (pending->remove) {
         delete_op_entry(lrm_state, op, op->rsc_id, op_key, op->call_id);
 
     } else {
         /* Before a stop is called, no need to direct ack */
         crm_trace("Op %s (call=%d): no delete event required", op_key, op->call_id);
     }
 
     if ((op->interval == 0) && g_hash_table_remove(lrm_state->pending_ops, op_id)) {
         removed = TRUE;
         crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed",
                   op_key, op->call_id, op_id, g_hash_table_size(lrm_state->pending_ops));
 
     } else if(op->interval != 0 && op->op_status == PCMK_LRM_OP_CANCELLED) {
         removed = TRUE;
         g_hash_table_remove(lrm_state->pending_ops, op_id);
     }
 
     switch (op->op_status) {
         case PCMK_LRM_OP_CANCELLED:
-            crm_info("Operation %s: %s (call=%d, confirmed=%s)",
-                     op_key, services_lrm_status_str(op->op_status),
+            crm_info("Operation %s: %s (node=%s, call=%d, confirmed=%s)",
+                     op_key, lrm_state->node_name, services_lrm_status_str(op->op_status),
                      op->call_id, removed ? "true" : "false");
             break;
 
         case PCMK_LRM_OP_DONE:
-            crm_notice("Operation %s: %s (call=%d, rc=%d, cib-update=%d, confirmed=%s)",
-                       op_key, services_ocf_exitcode_str(op->rc),
+            crm_notice("Operation %s: %s (node=%s, call=%d, rc=%d, cib-update=%d, confirmed=%s)",
+                       lrm_state->node_name, op_key, services_ocf_exitcode_str(op->rc),
                        op->call_id, op->rc, update_id, removed ? "true" : "false");
             break;
 
         case PCMK_LRM_OP_TIMEOUT:
-            crm_err("Operation %s: %s (call=%d, timeout=%dms)",
-                    op_key, services_lrm_status_str(op->op_status), op->call_id, op->timeout);
+            crm_err("Operation %s: %s (node=%s, call=%d, timeout=%dms)",
+                    op_key, lrm_state->node_name, services_lrm_status_str(op->op_status), op->call_id, op->timeout);
             break;
 
         default:
-            crm_err("Operation %s (call=%d, status=%d, cib-update=%d, confirmed=%s) %s",
-                    op_key, op->call_id, op->op_status, update_id, removed ? "true" : "false",
+            crm_err("Operation %s (node=%s, call=%d, status=%d, cib-update=%d, confirmed=%s) %s",
+                    op_key, lrm_state->node_name, op->call_id, op->op_status, update_id, removed ? "true" : "false",
                     services_lrm_status_str(op->op_status));
     }
 
     if (op->output) {
         char *prefix =
             g_strdup_printf("%s-%s_%s_%d:%d", lrm_state->node_name, op->rsc_id, op->op_type, op->interval, op->call_id);
 
         if (op->rc) {
             crm_log_output(LOG_NOTICE, prefix, op->output);
         } else {
             crm_log_output(LOG_DEBUG, prefix, op->output);
         }
         g_free(prefix);
     }
 
     if (op->rsc_deleted) {
         crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key);
         delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL);
     }
 
     /* If a shutdown was escalated while operations were pending,
      * then the FSA will be stalled right now... allow it to continue
      */
     mainloop_set_trigger(fsa_source);
     update_history_cache(lrm_state, rsc, op);
 
     lrmd_free_rsc_info(rsc);
     free(op_key);
     free(op_id);
 
     return TRUE;
 }
diff --git a/cts/CM_ais.py b/cts/CM_ais.py
index bfa0aa3731..8c12697bce 100644
--- a/cts/CM_ais.py
+++ b/cts/CM_ais.py
@@ -1,448 +1,448 @@
 '''CTS: Cluster Testing System: AIS dependent modules...
 '''
 
 __copyright__='''
 Copyright (C) 2007 Andrew Beekhof <andrew@suse.de>
 
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 import os, sys, warnings
 from cts.CTSvars import *
 from cts.CM_lha  import crm_lha
 from cts.CTS     import Process
 
 #######################################################################
 #
 #  LinuxHA v2 dependent modules
 #
 #######################################################################
 
 class crm_ais(crm_lha):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None):
         crm_lha.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-ais",
 
             "EpocheCmd"      : "crm_node -e --openais",
             "QuorumCmd"      : "crm_node -q --openais",
             "ParitionCmd"    : "crm_node -p --openais",
 
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:ChildExit"    : "Child process .* exited",
 
             # Bad news Regexes.  Should never occur.
             "BadRegexes"   : (
                 r" trace:",
                 r"error:",
                 r"crit:",
                 r"ERROR:",
                 r"CRIT:",
                 r"Shutting down...NOW",
                 r"Timer I_TERMINATE just popped",
                 r"input=I_ERROR",
                 r"input=I_FAIL",
                 r"input=I_INTEGRATED cause=C_TIMER_POPPED",
                 r"input=I_FINALIZED cause=C_TIMER_POPPED",
                 r"input=I_ERROR",
                 r", exiting\.",
                 r"(WARN|warn).*Ignoring HA message.*vote.*not in our membership list",
                 r"pengine.*Attempting recovery of resource",
                 r"is taking more than 2x its timeout",
                 r"Confirm not received from",
                 r"Welcome reply not received from",
                 r"Attempting to schedule .* after a stop",
                 r"Resource .* was active at shutdown",
                 r"duplicate entries for call_id",
                 r"Search terminated:",
                 r":global_timer_callback",
                 r"Faking parameter digest creation",
                 r"Parameters to .* action changed:",
                 r"Parameters to .* changed",
                 r"Child process .* terminated with signal",
                 r"LogActions: Recover",
                 r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
                 r"Peer is not part of our cluster",
                 r"We appear to be in an election loop",
                 r"Unknown node -> we will not deliver message",
                 r"crm_write_blackbox",
                 r"pacemakerd.*Could not connect to Cluster Configuration Database API",
                 r"Receiving messages from a node we think is dead",
                 r"share the same cluster nodeid",
                 r"share the same name",
 
                 #r"crm_ipc_send:.*Request .* failed",
                 #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
 
                 # Not inherently bad, but worth tracking
                 #r"No need to invoke the TE",
                 #r"ping.*: DEBUG: Updated connected = 0",
                 #r"Digest mis-match:",
                 r"te_graph_trigger: Transition failed: terminated",
                 r"process_ping_reply",
                 r"retrieveCib",
                 r"cib_process_replace",
                 #r"Executing .* fencing operation",
                 #r"fence_pcmk.* Call to fence",
                 #r"fence_pcmk",
                 r"cman killed by node",
                 r"Election storm",
                 r"stalled the FSA with pending inputs",
             ),
         })
 
     def errorstoignore(self):
         # At some point implement a more elegant solution that
         #   also produces a report at the end
         '''Return list of errors which are known and very noisey should be ignored'''
         if 1:
             return [
                 r"crm_mon:",
                 r"crmadmin:",
                 r"update_trace_data",
                 r"async_notify: strange, client not found",
                 r"Parse error: Ignoring unknown option .*nodename",
                 r"error: log_operation: Operation 'reboot' .* with device 'FencingFail' returned:",
                 r"Child process .* terminated with signal 9",
                 r"getinfo response error: 1$",
                 ]
         return []
 
     def NodeUUID(self, node):
         return node
 
     def ais_components(self):
         fullcomplist = {}
         self.complist = []
         self.common_ignore = [
                     "Pending action:",
                     "error: crm_log_message_adv:",
                     "error: MSG: No message to dump",
                     "resources were active at shutdown",
                     "pending LRM operations at shutdown",
                     "Lost connection to the CIB service",
                     "Connection to the CIB terminated...",
                     "Sending message to CIB service FAILED",
                     "apply_xml_diff: Diff application failed!",
                     "crmd.*Action A_RECOVER .* not supported",
                     "unconfirmed_actions: Waiting on .* unconfirmed actions",
                     "cib_native_msgready: Message pending on command channel",
                     "crmd.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd",
                     "verify_stopped: Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
                     "error: attrd_connection_destroy: Lost connection to attrd",
                     "info: te_fence_node: Executing .* fencing operation",
                     "crm_write_blackbox:",
 #                    "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery",
 #                    "error: process_pe_message: Transition .* ERRORs found during PE processing",
             ]
 
         cib_ignore = [
             "lrmd.*error: crm_ipc_read: Connection to stonith-ng failed",
             "lrmd.*error: mainloop_gio_callback: Connection to stonith-ng.* closed",
             "lrmd.*error: stonith_connection_destroy_cb: LRMD lost STONITH connection",
             "lrmd.*error: stonith_connection_failed: STONITH connection failed, finalizing .* pending operations",
             ]
 
         fullcomplist["cib"] = Process(self, "cib", pats = [
                     "State transition .* S_RECOVERY",
                     "Respawning .* crmd",
                     "Respawning .* attrd",
                     "error: crm_ipc_read: Connection to cib_.* failed",
                     "error: mainloop_gio_callback: Connection to cib_.* closed",
                     "Connection to the CIB terminated...",
                     "Child process crmd .* exited: Generic Pacemaker error",
                     "Child process attrd .* exited: (Connection reset by peer|Transport endpoint is not connected)",
                      "error: attrd_cib_destroy_cb: Lost connection to CIB service",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
                     "crmd.*Could not recover from internal error",
                     ], badnews_ignore = cib_ignore, common_ignore = self.common_ignore)
 
         fullcomplist["lrmd"] = Process(self, "lrmd", pats = [
                     "State transition .* S_RECOVERY",
                     "LRM Connection failed",
                     "Respawning .* crmd",
                     "error: crm_ipc_read: Connection to lrmd failed",
                     "error: mainloop_gio_callback: Connection to lrmd.* closed",
                     "crmd.*I_ERROR.*lrm_connection_destroy",
                     "Child process crmd .* exited: Generic Pacemaker error",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.* Could not recover from internal error",
                     ], badnews_ignore = self.common_ignore)
 
         fullcomplist["crmd"] = Process(self, "crmd", pats = [
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling Node .* for STONITH",
 #                    "Executing .* fencing operation",
 # Only if the node wasn't the DC:  "State transition S_IDLE",
                     "State transition .* -> S_IDLE",
                     ], badnews_ignore = self.common_ignore)
 
         fullcomplist["attrd"] = Process(self, "attrd", pats = [
                     ], badnews_ignore = self.common_ignore)
 
         fullcomplist["pengine"] = Process(self, "pengine", dc_pats = [
                     "State transition .* S_RECOVERY",
                     "Respawning .* crmd",
                     "Child process crmd .* exited: Generic Pacemaker error",
                     "crm_ipc_read: Connection to pengine failed",
                     "error: mainloop_gio_callback: Connection to pengine.* closed",
                     "crit: pe_ipc_destroy: Connection to the Policy Engine failed",
                     "crmd.*I_ERROR.*save_cib_contents",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.* Could not recover from internal error",
                     ], badnews_ignore = self.common_ignore)
 
         stonith_ignore = [
             "LogActions: Recover Fencing",
             "update_failcount: Updating failcount for Fencing",
             "error: te_connect_stonith: Sign-in failed: triggered a retry",
             "stonith_connection_failed: STONITH connection failed, finalizing .* pending operations.",
             "process_lrm_event: Operation Fencing.* Error"
             ]
 
         stonith_ignore.extend(self.common_ignore)
 
         fullcomplist["stonith-ng"] = Process(self, "stonith-ng", process="stonithd", pats = [
                 "crm_ipc_read: Connection to stonith-ng failed",
                 "stonith_connection_destroy_cb: LRMD lost STONITH connection",
                 "mainloop_gio_callback: Connection to stonith-ng.* closed",
                 "tengine_stonith_connection_destroy: Fencing daemon connection failed",
                 "crmd.*stonith_api_add_notification: Callback already present",
                 ], badnews_ignore = stonith_ignore)
 
         vgrind = self.Env["valgrind-procs"].split()
         for key in fullcomplist.keys():
             if self.Env["valgrind-tests"]:
                 if key in vgrind:
                     # Processes running under valgrind can't be shot with "killall -9 processname"
                     self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                     continue
             if key == "stonith-ng" and not self.Env["DoFencing"]:
                 continue
 
             self.complist.append(fullcomplist[key])
 
         #self.complist = [ fullcomplist["pengine"] ]
         return self.complist
 
 class crm_whitetank(crm_ais):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None):
         crm_ais.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-whitetank",
             "StartCmd"       : "service openais start",
             "StopCmd"        : "service openais stop",
 
             "Pat:We_stopped"   : "%s.*openais.*pcmk_shutdown: Shutdown complete",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "openais:.*Node %s is now: lost",
 
             "Pat:ChildKilled"  : "%s openais.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s openais.*Respawning failed child process: %s",
             "Pat:ChildExit"    : "Child process .* exited",
         })
 
     def Components(self):
         self.ais_components()
 
         aisexec_ignore = [
                     "error: ais_dispatch: Receiving message .* failed",
                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
                     "cib.*error: cib_cs_destroy: AIS connection terminated",
                     #"crmd.*error: crm_ais_destroy: AIS connection terminated",
                     "crmd.* Could not recover from internal error",
                     "crmd.*I_TERMINATE.*do_recover",
                     "attrd.*attrd_cs_destroy: Lost connection to Corosync service!",
                     "stonithd.*error: Corosync connection terminated",
             ]
 
         aisexec_ignore.extend(self.common_ignore)
 
         self.complist.append(Process(self, "aisexec", pats = [
                     "error: ais_dispatch: AIS connection failed",
                     "crmd.*error: do_exit: Could not recover from internal error",
                     "pengine.*Scheduling Node .* for STONITH",
                     "stonithd.*requests a STONITH operation RESET on node",
                     "stonithd.*Succeeded to STONITH the node",
                     ], badnews_ignore = aisexec_ignore))
 
 class crm_cs_v0(crm_ais):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
 
     crm clusters running against version 0 of our plugin
     '''
     def __init__(self, Environment, randseed=None):
         crm_ais.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-plugin-v0",
             "StartCmd"       : "service corosync start",
             "StopCmd"        : "service corosync stop",
 
 # The next pattern is too early
 #            "Pat:We_stopped"   : "%s.*Service engine unloaded: Pacemaker Cluster Manager",
 # The next pattern would be preferred, but it doesn't always come out
 #            "Pat:We_stopped"   : "%s.*Corosync Cluster Engine exiting with status",
             "Pat:We_stopped"  : "%s.*Service engine unloaded: corosync cluster quorum service",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "corosync:.*Node %s is now: lost",
 
             "Pat:ChildKilled"  : "%s corosync.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s corosync.*Respawning failed child process: %s",
         })
 
     def Components(self):
         self.ais_components()
 
         corosync_ignore = [
             r"error: pcmk_cpg_dispatch: Connection to the CPG API failed: Library error",
             r"pacemakerd.*error: pcmk_child_exit: Child process .* exited",
             r"cib.*error: cib_cs_destroy: Corosync connection lost",
             r"stonith-ng.*error: stonith_peer_cs_destroy: Corosync connection terminated",
             r"error: pcmk_child_exit: Child process cib .* exited: Invalid argument",
             r"error: pcmk_child_exit: Child process attrd .* exited: Transport endpoint is not connected",
             r"error: pcmk_child_exit: Child process crmd .* exited: Link has been severed",
             r"lrmd.*error: crm_ipc_read: Connection to stonith-ng failed",
             r"lrmd.*error: mainloop_gio_callback: Connection to stonith-ng.* closed",
             r"lrmd.*error: stonith_connection_destroy_cb: LRMD lost STONITH connection",
             r"crmd.*do_state_transition: State transition .* S_RECOVERY",
             r"crmd.*error: do_log: FSA: Input I_ERROR",
             r"crmd.*error: do_log: FSA: Input I_TERMINATE",
             r"crmd.*error: pcmk_cman_dispatch: Connection to cman failed",
             r"crmd.*error: crmd_fast_exit: Could not recover from internal error",
             r"error: crm_ipc_read: Connection to cib_shm failed",
             r"error: mainloop_gio_callback: Connection to cib_shm.* closed",
             r"error: stonith_connection_failed: STONITH connection failed",
             ]
 
         self.complist.append(Process(self, "corosync", pats = [
                     r"pacemakerd.*error: cfg_connection_destroy: Connection destroyed",
                     r"pacemakerd.*error: mcp_cpg_destroy: Connection destroyed",
                     r"crit: attrd_(cs|cpg)_destroy: Lost connection to Corosync service",
                     r"stonith_peer_cs_destroy: Corosync connection terminated",
                     r"cib_cs_destroy: Corosync connection lost!  Exiting.",
                     r"crmd_(cs|quorum)_destroy: connection terminated",
                     r"pengine.*Scheduling Node .* for STONITH",
                     r"tengine_stonith_notify: Peer .* was terminated .*: OK",
                     ], badnews_ignore = corosync_ignore, common_ignore = self.common_ignore))
 
         return self.complist
 
 class crm_cs_v1(crm_cs_v0):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
 
     crm clusters running on top of version 1 of our plugin
     '''
     def __init__(self, Environment, randseed=None):
         crm_cs_v0.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-plugin-v1",
             "StartCmd"       : "service corosync start && service pacemaker start",
-            "StopCmd"        : "service pacemaker stop; service corosync stop",
+            "StopCmd"        : "service pacemaker stop; service pacemaker_remote stop; service corosync stop",
 
             "EpocheCmd"      : "crm_node -e",
             "QuorumCmd"      : "crm_node -q",
             "ParitionCmd"    : "crm_node -p",
 
             "Pat:We_stopped"  : "%s.*Service engine unloaded: corosync cluster quorum service",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildKilled"  : "%s pacemakerd.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s",
         })
 
 class crm_mcp(crm_cs_v0):
     '''
     The crm version 4 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of native corosync (no plugins)
     '''
     def __init__(self, Environment, randseed=None):
         crm_cs_v0.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-mcp",
             "StartCmd"       : "service corosync start && service pacemaker start",
-            "StopCmd"        : "service pacemaker stop; service corosync stop",
+            "StopCmd"        : "service pacemaker stop; service pacemaker_remote stop; service corosync stop",
 
             "EpocheCmd"      : "crm_node -e",
             "QuorumCmd"      : "crm_node -q",
             "ParitionCmd"    : "crm_node -p",
 
             # Close enough... "Corosync Cluster Engine exiting normally" isn't printed
             #   reliably and there's little interest in doing anything it
             "Pat:We_stopped"   : "%s.*Unloading all Corosync service engines",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildKilled"  : "%s pacemakerd.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s",
 
             "Pat:InfraUp"      : "%s corosync.*Initializing transport",
             "Pat:PacemakerUp"  : "%s pacemakerd.*Starting Pacemaker",
         })
 
         if self.Env["have_systemd"]:
             self.update({
                 # When systemd is in use, we can look for this instead
                 "Pat:We_stopped"   : "%s.*Stopped Corosync Cluster Engine",
             })
 
 class crm_cman(crm_cs_v0):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None):
         crm_cs_v0.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-cman",
             "StartCmd"       : "service pacemaker start",
-            "StopCmd"        : "service pacemaker stop",
+            "StopCmd"        : "service pacemaker stop; service pacemaker_remote stop",
 
             "EpocheCmd"      : "crm_node -e --cman",
             "QuorumCmd"      : "crm_node -q --cman",
             "ParitionCmd"    : "crm_node -p --cman",
 
             "Pat:We_stopped"   : "%s.*Unloading all Corosync service engines",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildKilled"  : "%s pacemakerd.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s",
         })
diff --git a/cts/CTS.py b/cts/CTS.py
index 896a21a877..ed42634451 100644
--- a/cts/CTS.py
+++ b/cts/CTS.py
@@ -1,1791 +1,1791 @@
 '''CTS: Cluster Testing System: Main module
 
 Classes related to testing high-availability clusters...
  '''
 
 __copyright__='''
 Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
 Licensed under the GNU GPL.
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 import types, string, select, sys, time, re, os, struct, signal
 import time, syslog, random, traceback, base64, pickle, binascii, fcntl
 
 
 from socket import gethostbyname_ex
 from UserDict import UserDict
 from subprocess import Popen,PIPE
 from cts.CTSvars import *
 from threading import Thread
 
 trace_rsh=None
 trace_lw=None
 
 has_log_stats = {}
 log_stats_bin = CTSvars.CRM_DAEMON_DIR + "/cts_log_stats.sh"
 log_stats = """
 #!/bin/bash
 # Tool for generating system load reports while CTS runs
 
 trap "" 1
 
 f=$1; shift
 action=$1; shift
 base=`basename $0`
 
 if [ ! -e $f ]; then
     echo "Time, Load 1, Load 5, Load 15, Test Marker" > $f
 fi
 
 function killpid() {
     if [ -e $f.pid ]; then
        kill -9 `cat $f.pid`
        rm -f $f.pid
     fi
 }
 
 function status() {
     if [ -e $f.pid ]; then
        kill -0 `cat $f.pid`
        return $?
     else
        return 1
     fi
 }
 
 function start() {
     # Is it already running?
     if
 	status
     then
         return
     fi
 
     echo Active as $$
     echo $$ > $f.pid
 
     while [ 1 = 1 ]; do
         uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
         #top -b -c -n1 | grep -e usr/libexec/pacemaker | grep -v -e grep -e python | head -n 1 | sed s@/usr/libexec/pacemaker/@@ | awk '{print " 0, "$9", "$10", "$12}' | tr '\\n' ',' >> $f
         echo 0 >> $f
         sleep 5
     done
 }
 
 case $action in
     start)
         start
         ;;
     start-bg|bg)
         # Use c --ssh -- ./stats.sh file start-bg
         nohup $0 $f start >/dev/null 2>&1 </dev/null &
         ;;
     stop)
 	killpid
 	;;
     delete)
 	killpid
 	rm -f $f
 	;;
     mark)
 	uptime | sed s/up.*:/,/ | tr '\\n' ',' >> $f
 	echo " $*" >> $f
         start
 	;;
     *)
 	echo "Unknown action: $action."
 	;;
 esac
 """
 
 class CtsLab(UserDict):
     '''This class defines the Lab Environment for the Cluster Test System.
     It defines those things which are expected to change from test
     environment to test environment for the same cluster manager.
 
     It is where you define the set of nodes that are in your test lab
     what kind of reset mechanism you use, etc.
 
     This class is derived from a UserDict because we hold many
     different parameters of different kinds, and this provides
     provide a uniform and extensible interface useful for any kind of
     communication between the user/administrator/tester and CTS.
 
     At this point in time, it is the intent of this class to model static
     configuration and/or environmental data about the environment which
     doesn't change as the tests proceed.
 
     Well-known names (keys) are an important concept in this class.
     The HasMinimalKeys member function knows the minimal set of
     well-known names for the class.
 
     The following names are standard (well-known) at this time:
 
         nodes           An array of the nodes in the cluster
         reset           A ResetMechanism object
         logger          An array of objects that log strings...
         CMclass         The type of ClusterManager we are running
                         (This is a class object, not a class instance)
         RandSeed        Random seed.  It is a triple of bytes. (optional)
 
     The CTS code ignores names it doesn't know about/need.
     The individual tests have access to this information, and it is
     perfectly acceptable to provide hints, tweaks, fine-tuning
     directions or other information to the tests through this mechanism.
     '''
 
     def __init__(self):
         self.data = {}
         self.rsh = RemoteExec(self)
         self.RandomGen = random.Random()
         self.Scenario = None
 
         #  Get a random seed for the random number generator.
         self["LogWatcher"] = "any"
         self["LogFileName"] = "/var/log/messages"
         self["OutputFile"] = None
         self["SyslogFacility"] = "daemon"
         self["CMclass"] = None
         self["logger"] = ([StdErrLog(self)])
 
         self.SeedRandom()
 
     def SeedRandom(self, seed=None):
         if not seed:
             seed = int(time.time())
 
         if self.has_key("RandSeed"):
             self.log("New random seed is: " + str(seed))
         else:
             self.log("Random seed is: " + str(seed))
 
         self["RandSeed"] = seed
         self.RandomGen.seed(str(seed))
 
     def HasMinimalKeys(self):
         'Return TRUE if our object has the minimal set of keys/values in it'
         result = 1
         for key in self.MinimalKeys:
             if not self.has_key(key):
                 result = None
         return result
 
     def log(self, args):
         "Log using each of the supplied logging methods"
         for logfcn in self._logfunctions:
             logfcn(string.strip(args))
 
     def debug(self, args):
         "Log using each of the supplied logging methods"
         for logfcn in self._logfunctions:
             if logfcn.name() != "StdErrLog":
                 logfcn("debug: %s" % string.strip(args))
 
     def dump(self):
         keys = []
         for key in self.keys():
             keys.append(key)
 
         keys.sort()
         for key in keys:
             self.debug("Environment["+key+"]:\t"+str(self[key]))
 
     def run(self, Scenario, Iterations):
         if not Scenario:
             self.log("No scenario was defined")
             return 1
 
         self.log("Cluster nodes: ")
         for node in self["nodes"]:
             self.log("    * %s" % (node))
 
         self.StatsMark(0)
         if not Scenario.SetUp():
             return 1
 
         try :
             Scenario.run(Iterations)
         except :
             self.log("Exception by %s" % sys.exc_info()[0])
             for logmethod in self["logger"]:
                 traceback.print_exc(50, logmethod)
 
             Scenario.summarize()
             Scenario.TearDown()
             self.StatsExtract()
             return 1
 
         #ClusterManager.oprofileSave(Iterations)
         Scenario.TearDown()
         self.StatsExtract()
 
         Scenario.summarize()
         if Scenario.Stats["failure"] > 0:
             return Scenario.Stats["failure"]
 
         elif Scenario.Stats["success"] != Iterations:
             self.log("No failure count but success != requested iterations")
             return 1
 
         return 0
 
     def __setitem__(self, key, value):
         '''Since this function gets called whenever we modify the
         dictionary (object), we can (and do) validate those keys that we
         know how to validate.  For the most part, we know how to validate
         the "MinimalKeys" elements.
         '''
 
         #
         #        List of nodes in the system
         #
         if key == "nodes":
             self.Nodes = {}
             for node in value:
                 # I don't think I need the IP address, etc. but this validates
                 # the node name against /etc/hosts and/or DNS, so it's a
                 # GoodThing(tm).
                 try:
                     self.Nodes[node] = gethostbyname_ex(node)
                 except:
                     print node+" not found in DNS... aborting"
                     raise
         #
         #        List of Logging Mechanism(s)
         #
         elif key == "logger":
             if len(value) < 1:
                 raise ValueError("Must have at least one logging mechanism")
             for logger in value:
                 if not callable(logger):
                     raise ValueError("'logger' elements must be callable")
             self._logfunctions = value
         #
         #        Cluster Manager Class
         #
         elif key == "CMclass":
             if value and not issubclass(value, ClusterManager):
                 raise ValueError("'CMclass' must be a subclass of"
                 " ClusterManager")
         #
         #        Initial Random seed...
         #
         #elif key == "RandSeed":
         #    if len(value) != 3:
         #        raise ValueError("'Randseed' must be a 3-element list/tuple")
         #    for elem in value:
         #        if not isinstance(elem, types.IntType):
         #            raise ValueError("'Randseed' list must all be ints")
 
         self.data[key] = value
 
     def IsValidNode(self, node):
         'Return TRUE if the given node is valid'
         return self.Nodes.has_key(node)
 
     def __CheckNode(self, node):
         "Raise a ValueError if the given node isn't valid"
 
         if not self.IsValidNode(node):
             raise ValueError("Invalid node [%s] in CheckNode" % node)
 
     def RandomNode(self):
         '''Choose a random node from the cluster'''
         return self.RandomGen.choice(self["nodes"])
 
     def StatsExtract(self):
         if not self["stats"]:
             return
 
         for host in self["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
             if has_log_stats.has_key(host):
                 self.rsh(host, '''bash %s %s stop''' % (log_stats_bin, log_stats_file))
                 (rc, lines) = self.rsh(host, '''cat %s''' % log_stats_file, stdout=2)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
 
                 fname = "cts-stats-%d-nodes-%s.csv" % (len(self["nodes"]), host)
                 print "Extracted stats: %s" % fname
                 fd = open(fname, "a")
                 fd.writelines(lines)
                 fd.close()
 
     def StatsMark(self, testnum):
         '''Mark the test number in the stats log'''
 
         global has_log_stats
         if not self["stats"]:
             return
 
         for host in self["nodes"]:
             log_stats_file = "%s/cts-stats.csv" % CTSvars.CRM_DAEMON_DIR
             if not has_log_stats.has_key(host):
 
                 global log_stats
                 global log_stats_bin
                 script=log_stats
                 #script = re.sub("\\\\", "\\\\", script)
                 script = re.sub('\"', '\\\"', script)
                 script = re.sub("'", "\'", script)
                 script = re.sub("`", "\`", script)
                 script = re.sub("\$", "\\\$", script)
 
                 self.debug("Installing %s on %s" % (log_stats_bin, host))
                 self.rsh(host, '''echo "%s" > %s''' % (script, log_stats_bin), silent=True)
                 self.rsh(host, '''bash %s %s delete''' % (log_stats_bin, log_stats_file))
                 has_log_stats[host] = 1
 
             # Now mark it
             self.rsh(host, '''bash %s %s mark %s''' % (log_stats_bin, log_stats_file, testnum), synchronous=0)
 
 class Logger:
     TimeFormat = "%b %d %H:%M:%S\t"
 
     def __call__(self, lines):
         raise ValueError("Abstract class member (__call__)")
     def write(self, line):
         return self(line.rstrip())
     def writelines(self, lines):
         for s in lines:
             self.write(s)
         return 1
     def flush(self):
         return 1
     def isatty(self):
         return None
 
 class SysLog(Logger):
     # http://docs.python.org/lib/module-syslog.html
     defaultsource="CTS"
     map = {
             "kernel":   syslog.LOG_KERN,
             "user":     syslog.LOG_USER,
             "mail":     syslog.LOG_MAIL,
             "daemon":   syslog.LOG_DAEMON,
             "auth":     syslog.LOG_AUTH,
             "lpr":      syslog.LOG_LPR,
             "news":     syslog.LOG_NEWS,
             "uucp":     syslog.LOG_UUCP,
             "cron":     syslog.LOG_CRON,
             "local0":   syslog.LOG_LOCAL0,
             "local1":   syslog.LOG_LOCAL1,
             "local2":   syslog.LOG_LOCAL2,
             "local3":   syslog.LOG_LOCAL3,
             "local4":   syslog.LOG_LOCAL4,
             "local5":   syslog.LOG_LOCAL5,
             "local6":   syslog.LOG_LOCAL6,
             "local7":   syslog.LOG_LOCAL7,
     }
     def __init__(self, labinfo):
 
         if labinfo.has_key("syslogsource"):
             self.source=labinfo["syslogsource"]
         else:
             self.source=SysLog.defaultsource
 
 	self.facility="daemon"
 
         if labinfo.has_key("SyslogFacility") and labinfo["SyslogFacility"]:
 	    if SysLog.map.has_key(labinfo["SyslogFacility"]):
 		self.facility=labinfo["SyslogFacility"]
 	    else:
                 raise ValueError("%s: bad syslog facility"%labinfo["SyslogFacility"])
 
 	self.facility=SysLog.map[self.facility]
         syslog.openlog(self.source, 0, self.facility)
 
     def setfacility(self, facility):
         self.facility = facility
         if SysLog.map.has_key(self.facility):
           self.facility=SysLog.map[self.facility]
         syslog.closelog()
         syslog.openlog(self.source, 0, self.facility)
 
 
     def __call__(self, lines):
         if isinstance(lines, types.StringType):
             syslog.syslog(lines)
         else:
             for line in lines:
                 syslog.syslog(line)
 
     def name(self):
         return "Syslog"
 
 class StdErrLog(Logger):
 
     def __init__(self, labinfo):
         pass
 
     def __call__(self, lines):
         t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
         if isinstance(lines, types.StringType):
             sys.__stderr__.writelines([t, lines, "\n"])
         else:
             for line in lines:
                 sys.__stderr__.writelines([t, line, "\n"])
         sys.__stderr__.flush()
 
     def name(self):
         return "StdErrLog"
 
 class FileLog(Logger):
     def __init__(self, labinfo, filename=None):
 
         if filename == None:
             filename=labinfo["LogFileName"]
 
         self.logfile=filename
         import os
         self.hostname = os.uname()[1]+" "
         self.source = "CTS: "
     def __call__(self, lines):
 
         fd = open(self.logfile, "a")
         t = time.strftime(Logger.TimeFormat, time.localtime(time.time()))
 
         if isinstance(lines, types.StringType):
             fd.writelines([t, self.hostname, self.source, lines, "\n"])
         else:
             for line in lines:
                 fd.writelines([t, self.hostname, self.source, line, "\n"])
         fd.close()
 
     def name(self):
         return "FileLog"
 
 class AsyncWaitProc(Thread):
     def __init__(self, proc, node, command, Env):
         self.Env = Env
         self.proc = proc
         self.node = node
         self.command = command
         Thread.__init__(self)
 
     def log(self, args):
         if not self.Env:
             print (args)
         else:
             self.Env.log(args)
 
     def debug(self, args):
         if not self.Env:
             print (args)
         else:
             self.Env.debug(args)
     def run(self):
         self.debug("cmd: async: target=%s, pid=%d: %s" % (self.node, self.proc.pid, self.command))
 
         self.proc.wait()
         self.debug("cmd: pid %d returned %d" % (self.proc.pid, self.proc.returncode))
 
         if self.proc.stderr:
             lines = self.proc.stderr.readlines()
             self.proc.stderr.close()
             for line in lines:
                 self.debug("cmd: stderr[%d]: %s" % (self.proc.pid, line))
 
         if self.proc.stdout:
             lines = self.proc.stdout.readlines()
             self.proc.stdout.close()
             for line in lines:
                 self.debug("cmd: stdout[%d]: %s" % (self.proc.pid, line))
 
 class RemoteExec:
     '''This is an abstract remote execution class.  It runs a command on another
        machine - somehow.  The somehow is up to us.  This particular
        class uses ssh.
        Most of the work is done by fork/exec of ssh or scp.
     '''
 
     def __init__(self, Env=None, silent=False):
         self.Env = Env
         self.async = []
         self.silent = silent
 
         if trace_rsh:
             self.silent = False
 
         #   -n: no stdin, -x: no X11,
         #   -o ServerAliveInterval=5 disconnect after 3*5s if the server stops responding
         self.Command = "ssh -l root -n -x -o ServerAliveInterval=5 -o ConnectTimeout=10 -o TCPKeepAlive=yes -o ServerAliveCountMax=3 "
         #        -B: batch mode, -q: no stats (quiet)
         self.CpCommand = "scp -B -q"
 
         self.OurNode=string.lower(os.uname()[1])
 
     def enable_qarsh(self):
         # http://nstraz.wordpress.com/2008/12/03/introducing-qarsh/
         self.log("Using QARSH for connections to cluster nodes")
 
         self.Command = "qarsh -t 300 -l root"
         self.CpCommand = "qacp -q"
 
     def _fixcmd(self, cmd):
         return re.sub("\'", "'\\''", cmd)
 
     def _cmd(self, *args):
 
         '''Compute the string that will run the given command on the
         given remote system'''
 
         args= args[0]
         sysname = args[0]
         command = args[1]
 
         #print "sysname: %s, us: %s" % (sysname, self.OurNode)
         if sysname == None or string.lower(sysname) == self.OurNode or sysname == "localhost":
             ret = command
         else:
             ret = self.Command + " " + sysname + " '" + self._fixcmd(command) + "'"
         #print ("About to run %s\n" % ret)
         return ret
 
     def log(self, args):
         if not self.silent:
             if not self.Env:
                 print (args)
             else:
                 self.Env.log(args)
 
     def debug(self, args):
         if not self.silent:
             if not self.Env:
                 print (args)
             else:
                 self.Env.debug(args)
 
     def __call__(self, node, command, stdout=0, synchronous=1, silent=False, blocking=True):
         '''Run the given command on the given remote system
         If you call this class like a function, this is the function that gets
         called.  It just runs it roughly as though it were a system() call
         on the remote machine.  The first argument is name of the machine to
         run it on.
         '''
 
         if trace_rsh:
             silent = False
 
         rc = 0
         result = None
         proc = Popen(self._cmd([node, command]),
                      stdout = PIPE, stderr = PIPE, close_fds = True, shell = True)
 
         if not synchronous and proc.pid > 0 and not self.silent:
             aproc = AsyncWaitProc(proc, node, command, self.Env)
             aproc.start()
             return 0
 
         #if not blocking:
         #    fcntl.fcntl(proc.stdout.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
 
         if proc.stdout:
             if stdout == 1:
                 result = proc.stdout.readline()
             else:
                 result = proc.stdout.readlines()
             proc.stdout.close()
         else:
             self.log("No stdout stream")
 
         rc = proc.wait()
 
         if not silent: self.debug("cmd: target=%s, rc=%d: %s" % (node, rc, command))
 
         if stdout == 1:
             return result
 
         if proc.stderr:
             errors = proc.stderr.readlines()
             proc.stderr.close()
             if not silent:
                 for err in errors:
                     if stdout == 3:
                         result.append("error: "+err)
                     else:
                         self.debug("cmd: stderr: %s" % err)
 
         if stdout == 0:
             if not silent and result:
                 for line in result:
                     self.debug("cmd: stdout: %s" % line)
             return rc
 
         return (rc, result)
 
     def cp(self, source, target, silent=False):
         '''Perform a remote copy'''
         cpstring = self.CpCommand  + " \'" + source + "\'"  + " \'" + target + "\'"
         rc = os.system(cpstring)
         if trace_rsh:
             silent = False
         if not silent: self.debug("cmd: rc=%d: %s" % (rc, cpstring))
 
         return rc
 
 
 has_log_watcher = {}
 log_watcher_bin = CTSvars.CRM_DAEMON_DIR + "/cts_log_watcher.py"
 log_watcher = """
 import sys, os, fcntl
 
 '''
 Remote logfile reader for CTS
 Reads a specified number of lines from the supplied offset
 Returns the current offset
 
 Contains logic for handling truncation
 '''
 
 limit    = 0
 offset   = 0
 prefix   = ''
 filename = '/var/log/messages'
 
 skipthis=None
 args=sys.argv[1:]
 for i in range(0, len(args)):
     if skipthis:
         skipthis=None
         continue
 
     elif args[i] == '-l' or args[i] == '--limit':
         skipthis=1
         limit = int(args[i+1])
 
     elif args[i] == '-f' or args[i] == '--filename':
         skipthis=1
         filename = args[i+1]
 
     elif args[i] == '-o' or args[i] == '--offset':
         skipthis=1
         offset = args[i+1]
 
     elif args[i] == '-p' or args[i] == '--prefix':
         skipthis=1
         prefix = args[i+1]
 
     elif args[i] == '-t' or args[i] == '--tag':
         skipthis=1
 
 if not os.access(filename, os.R_OK):
     print prefix + 'Last read: %d, limit=%d, count=%d - unreadable' % (0, limit, 0)
     sys.exit(1)
 
 logfile=open(filename, 'r')
 logfile.seek(0, os.SEEK_END)
 newsize=logfile.tell()
 
 if offset != 'EOF':
     offset = int(offset)
     if newsize >= offset:
         logfile.seek(offset)
     else:
         print prefix + ('File truncated from %d to %d' % (offset, newsize))
         if (newsize*1.05) < offset:
             logfile.seek(0)
         # else: we probably just lost a few logs after a fencing op
         #       continue from the new end
         # TODO: accept a timestamp and discard all messages older than it
 
 # Don't block when we reach EOF
 fcntl.fcntl(logfile.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
 
 count = 0
 while True:
     if logfile.tell() >= newsize:   break
     elif limit and count >= limit: break
 
     line = logfile.readline()
     if not line: break
 
     print line.strip()
     count += 1
 
 print prefix + 'Last read: %d, limit=%d, count=%d' % (logfile.tell(), limit, count)
 logfile.close()
 """
 
 class SearchObj:
     def __init__(self, Env, filename, host=None, name=None):
 
         self.Env = Env
         self.host = host
         self.name = name
         self.filename = filename
 
         self.offset = "EOF"
 
         if host == None:
             host = "localhost"
 
     def __str__(self):
         if self.host:
             return "%s:%s" % (self.host, self.filename)
         return self.filename
 
     def log(self, args):
         message = "lw: %s: %s" % (self, args)
         if not self.Env:
             print (message)
         else:
             self.Env.log(message)
 
     def debug(self, args):
         message = "lw: %s: %s" % (self, args)
         if not self.Env:
             print (message)
         else:
             self.Env.debug(message)
 
     def next(self):
         self.log("Not implemented")
 
 class FileObj(SearchObj):
     def __init__(self, Env, filename, host=None, name=None):
         global has_log_watcher
         SearchObj.__init__(self, Env, filename, host, name)
 
         if not has_log_watcher.has_key(host):
 
             global log_watcher
             global log_watcher_bin
 
             self.debug("Installing %s on %s" % (log_watcher_bin, host))
             self.Env.rsh(host, '''echo "%s" > %s''' % (log_watcher, log_watcher_bin), silent=True)
             has_log_watcher[host] = 1
 
         self.next()
 
     def next(self):
         cache = []
 
         global log_watcher_bin
         (rc, lines) = self.Env.rsh(
                 self.host,
                 "python %s -t %s -p CTSwatcher: -f %s -o %s" % (log_watcher_bin, self.name, self.filename, self.offset),
                 stdout=None, silent=True, blocking=False)
 
         for line in lines:
             match = re.search("^CTSwatcher:Last read: (\d+)", line)
             if match:
                 last_offset = self.offset
                 self.offset = match.group(1)
                 #if last_offset == "EOF": self.debug("Got %d lines, new offset: %s" % (len(lines), self.offset))
 
             elif re.search("^CTSwatcher:.*truncated", line):
                 self.log(line)
             elif re.search("^CTSwatcher:", line):
                 self.debug("Got control line: "+ line)
             else:
                 cache.append(line)
 
         return cache
 
 class JournalObj(SearchObj):
 
     def __init__(self, Env, host=None, name=None):
         SearchObj.__init__(self, Env, name, host, name)
         self.next()
 
     def next(self):
         cache = []
         command = "journalctl -q --after-cursor='%s' --show-cursor" % (self.offset)
         if self.offset == "EOF":
             command = "journalctl -q -n 0 --show-cursor"
 
         (rc, lines) = self.Env.rsh(self.host, command, stdout=None, silent=True, blocking=False)
 
         for line in lines:
             match = re.search("^-- cursor: ([^.]+)", line)
             if match:
                 last_offset = self.offset
                 self.offset = match.group(1)
                 if last_offset == "EOF": self.debug("Got %d lines, new cursor: %s" % (len(lines), self.offset))
             else:
                 cache.append(line)
 
         return cache
 
 class LogWatcher(RemoteExec):
 
     '''This class watches logs for messages that fit certain regular
        expressions.  Watching logs for events isn't the ideal way
        to do business, but it's better than nothing :-)
 
        On the other hand, this class is really pretty cool ;-)
 
        The way you use this class is as follows:
           Construct a LogWatcher object
           Call setwatch() when you want to start watching the log
           Call look() to scan the log looking for the patterns
     '''
 
     def __init__(self, Env, log, regexes, name="Anon", timeout=10, debug_level=None, silent=False, hosts=None, kind=None):
         '''This is the constructor for the LogWatcher class.  It takes a
         log name to watch, and a list of regular expressions to watch for."
         '''
         RemoteExec.__init__(self, Env)
 
         #  Validate our arguments.  Better sooner than later ;-)
         for regex in regexes:
             assert re.compile(regex)
 
         if kind:
             self.kind    = kind
         else:
             self.kind    = self.Env["LogWatcher"]
 
         self.name        = name
         self.regexes     = regexes
         self.filename    = log
         self.debug_level = debug_level
         self.whichmatch  = -1
         self.unmatched   = None
 
         self.file_list = []
         self.line_cache = []
 
         if hosts:
             self.hosts = hosts
         else:
             self.hosts = self.Env["nodes"]
 
         if trace_lw:
             self.debug_level = 3
             silent = False
 
         if not silent:
             for regex in self.regexes:
                 self.debug("Looking for regex: "+regex)
 
         self.Timeout = int(timeout)
         self.returnonlymatch = None
 
     def debug(self, args):
         message = "lw: %s: %s" % (self.name, args)
         if not self.Env:
             print (message)
         else:
             self.Env.debug(message)
 
     def setwatch(self):
         '''Mark the place to start watching the log from.
         '''
 
         if self.kind == "remote":
             for node in self.hosts:
                 self.file_list.append(FileObj(self.Env, self.filename, node, self.name))
 
         elif self.kind == "journal":
             for node in self.hosts:
                 self.file_list.append(JournalObj(self.Env, node, self.name))
 
         else:
             self.file_list.append(FileObj(self.Env, self.filename))
 
     def __del__(self):
         if self.debug_level > 1: self.debug("Destroy")
 
     def ReturnOnlyMatch(self, onlymatch=1):
         '''Specify one or more subgroups of the match to return rather than the whole string
            http://www.python.org/doc/2.5.2/lib/match-objects.html
         '''
         self.returnonlymatch = onlymatch
 
     def __get_lines(self):
         if not len(self.file_list):
             raise ValueError("No sources to read from")
 
         for f in self.file_list:
             lines = f.next()
             if len(lines):
                 self.line_cache.extend(lines)
 
     def look(self, timeout=None, silent=False):
         '''Examine the log looking for the given patterns.
         It starts looking from the place marked by setwatch().
         This function looks in the file in the fashion of tail -f.
         It properly recovers from log file truncation, but not from
         removing and recreating the log.  It would be nice if it
         recovered from this as well :-)
 
         We return the first line which matches any of our patterns.
         '''
         if timeout == None: timeout = self.Timeout
 
         if trace_lw:
             silent = False
 
         lines=0
         needlines=True
         begin=time.time()
         end=begin+timeout+1
         if self.debug_level > 2: self.debug("starting single search: timeout=%d, begin=%d, end=%d" % (timeout, begin, end))
 
         if not self.regexes:
             self.debug("Nothing to look for")
             return None
 
         while True:
 
             if len(self.line_cache):
                 lines += 1
                 line = self.line_cache[0]
                 self.line_cache.remove(line)
 
                 which=-1
                 if re.search("CTS:", line):
                     continue
                 if self.debug_level > 2: self.debug("Processing: "+ line)
                 for regex in self.regexes:
                     which=which+1
                     if self.debug_level > 3: self.debug("Comparing line to: "+ regex)
                     #matchobj = re.search(string.lower(regex), string.lower(line))
                     matchobj = re.search(regex, line)
                     if matchobj:
                         self.whichmatch=which
                         if self.returnonlymatch:
                             return matchobj.group(self.returnonlymatch)
                         else:
                             self.debug("Matched: "+line)
                             if self.debug_level > 1: self.debug("With: "+ regex)
                             return line
 
             elif timeout > 0 and end > time.time():
                 if self.debug_level > 1: self.debug("lines during timeout")
                 time.sleep(1)
                 self.__get_lines()
 
             elif needlines:
                 # Grab any relevant messages that might have arrived since
                 # the last time the buffer was populated
                 if self.debug_level > 1: self.debug("lines without timeout")
                 self.__get_lines()
 
                 # Don't come back here again
                 needlines = False
 
             else:
                 self.debug("Single search terminated: start=%d, end=%d, now=%d, lines=%d" % (begin, end, time.time(), lines))
                 return None
 
         self.debug("How did we get here")
         return None
 
     def lookforall(self, timeout=None, allow_multiple_matches=None, silent=False):
         '''Examine the log looking for ALL of the given patterns.
         It starts looking from the place marked by setwatch().
 
         We return when the timeout is reached, or when we have found
         ALL of the regexes that were part of the watch
         '''
 
         if timeout == None: timeout = self.Timeout
         save_regexes = self.regexes
         returnresult = []
 
         if trace_lw:
             silent = False
 
         if not silent:
             self.debug("starting search: timeout=%d" % timeout)
             for regex in self.regexes:
                 if self.debug_level > 2: self.debug("Looking for regex: "+regex)
 
         while (len(self.regexes) > 0):
             oneresult = self.look(timeout)
             if not oneresult:
                 self.unmatched = self.regexes
                 self.matched = returnresult
                 self.regexes = save_regexes
                 return None
 
             returnresult.append(oneresult)
             if not allow_multiple_matches:
                 del self.regexes[self.whichmatch]
 
             else:
                 # Allow multiple regexes to match a single line
                 tmp_regexes = self.regexes
                 self.regexes = []
                 which = 0
                 for regex in tmp_regexes:
                     matchobj = re.search(regex, oneresult)
                     if not matchobj:
                         self.regexes.append(regex)
 
         self.unmatched = None
         self.matched = returnresult
         self.regexes = save_regexes
         return returnresult
 
 class NodeStatus:
     def __init__(self, Env):
         self.Env = Env
 
     def IsNodeBooted(self, node):
         '''Return TRUE if the given node is booted (responds to pings)'''
         return self.Env.rsh("localhost", "ping -nq -c1 -w1 %s" % node, silent=True) == 0
 
     def IsSshdUp(self, node):
         rc = self.Env.rsh(node, "true", silent=True)
         return rc == 0
 
     def WaitForNodeToComeUp(self, node, Timeout=300):
         '''Return TRUE when given node comes up, or None/FALSE if timeout'''
         timeout=Timeout
         anytimeouts=0
         while timeout > 0:
             if self.IsNodeBooted(node) and self.IsSshdUp(node):
                 if anytimeouts:
                      # Fudge to wait for the system to finish coming up
                      time.sleep(30)
                      self.Env.debug("Node %s now up" % node)
                 return 1
 
             time.sleep(30)
             if (not anytimeouts):
                 self.Env.debug("Waiting for node %s to come up" % node)
 
             anytimeouts=1
             timeout = timeout - 1
 
         self.Env.log("%s did not come up within %d tries" % (node, Timeout))
         answer = raw_input('Continue? [nY]')
         if answer and answer == "n":
             raise ValueError("%s did not come up within %d tries" % (node, Timeout))
 
     def WaitForAllNodesToComeUp(self, nodes, timeout=300):
         '''Return TRUE when all nodes come up, or FALSE if timeout'''
 
         for node in nodes:
             if not self.WaitForNodeToComeUp(node, timeout):
                 return None
         return 1
 
 class ClusterManager(UserDict):
     '''The Cluster Manager class.
     This is an subclass of the Python dictionary class.
     (this is because it contains lots of {name,value} pairs,
     not because it's behavior is that terribly similar to a
     dictionary in other ways.)
 
     This is an abstract class which class implements high-level
     operations on the cluster and/or its cluster managers.
     Actual cluster managers classes are subclassed from this type.
 
     One of the things we do is track the state we think every node should
     be in.
     '''
 
 
     def __InitialConditions(self):
         #if os.geteuid() != 0:
         #  raise ValueError("Must Be Root!")
         None
 
     def _finalConditions(self):
         for key in self.keys():
             if self[key] == None:
                 raise ValueError("Improper derivation: self[" + key
                 +   "] must be overridden by subclass.")
 
     def __init__(self, Environment, randseed=None):
         self.Env = Environment
         self.__InitialConditions()
         self.clear_cache = 0
         self.TestLoggingLevel=0
         self.data = {
             "up"             : "up",        # Status meaning up
             "down"           : "down",  # Status meaning down
             "StonithCmd"     : "stonith -t baytech -p '10.10.10.100 admin admin' %s",
             "DeadTime"       : 30,        # Max time to detect dead node...
             "StartTime"      : 90,        # Max time to start up
     #
     # These next values need to be overridden in the derived class.
     #
             "Name"           : None,
             "StartCmd"       : None,
             "StopCmd"        : None,
             "StatusCmd"      : None,
             #"RereadCmd"      : None,
             "BreakCommCmd"   : None,
             "FixCommCmd"     : None,
             #"TestConfigDir"  : None,
             "LogFileName"    : None,
 
             #"Pat:Master_started"   : None,
             #"Pat:Slave_started" : None,
             "Pat:We_stopped"   : None,
             "Pat:They_stopped" : None,
 
             "Pat:InfraUp"      : "%s",
             "Pat:PacemakerUp"  : "%s",
 
             "BadRegexes"     : None,        # A set of "bad news" regexes
                                         # to apply to the log
         }
 
         self.rsh = self.Env.rsh
         self.ShouldBeStatus={}
         self.ns = NodeStatus(self.Env)
         self.OurNode=string.lower(os.uname()[1])
         self.__instance_errorstoignore = []
 
     def key_for_node(self, node):
         return node
 
     def instance_errorstoignore_clear(self):
         '''Allows the test scenario to reset instance errors to ignore on each iteration.'''
         self.__instance_errorstoignore = []
 
     def instance_errorstoignore(self):
         '''Return list of errors which are 'normal' for a specific test instance'''
         return self.__instance_errorstoignore
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return []
 
     def log(self, args):
         self.Env.log(args)
 
     def debug(self, args):
         self.Env.debug(args)
 
     def prepare(self):
         '''Finish the Initialization process. Prepare to test...'''
 
         for node in self.Env["nodes"]:
             if self.StataCM(node):
                 self.ShouldBeStatus[node]="up"
             else:
                 self.ShouldBeStatus[node]="down"
 
             self.unisolate_node(node)
 
     def upcount(self):
         '''How many nodes are up?'''
         count=0
         for node in self.Env["nodes"]:
           if self.ShouldBeStatus[node]=="up":
             count=count+1
         return count
 
     def install_helper(self, filename, destdir=None, nodes=None, sourcedir=None):
         if sourcedir == None:
             sourcedir = CTSvars.CTS_home
         file_with_path="%s/%s" % (sourcedir, filename)
         if not nodes:
             nodes = self.Env["nodes"]
 
         if not destdir:
             destdir=CTSvars.CTS_home
 
         self.debug("Installing %s to %s on %s" % (filename, destdir, repr(self.Env["nodes"])))
         for node in nodes:
             self.rsh(node, "mkdir -p %s" % destdir)
             self.rsh.cp(file_with_path, "root@%s:%s/%s" % (node, destdir, filename))
         return file_with_path
 
     def install_config(self, node):
         return None
 
     def clear_all_caches(self):
         if self.clear_cache:
             for node in self.Env["nodes"]:
                 if self.ShouldBeStatus[node] == "down":
                     self.debug("Removing cache file on: "+node)
                     self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
                 else:
                     self.debug("NOT Removing cache file on: "+node)
 
     def prepare_fencing_watcher(self, node):
         # If we don't have quorum now but get it as a result of starting this node,
         # then a bunch of nodes might get fenced
         upnode=None
         if self.HasQuorum(None):
             return None
 
         if not self.has_key("Pat:Fencing_start"):
             return None
 
         if not self.has_key("Pat:Fencing_ok"):
             return None
 
         stonith = None
         stonithPats = []
         for peer in self.Env["nodes"]:
             if peer != node and self.ShouldBeStatus[peer] != "up":
                 stonithPats.append(self["Pat:Fencing_ok"] % peer)
                 stonithPats.append(self["Pat:Fencing_start"] % peer)
             elif self.Env["Stack"] == "corosync (cman)":
                 # There is a delay between gaining quorum and CMAN starting fencing
                 # This can mean that even nodes that are fully up get fenced
                 # There is no use fighting it, just look for everyone so that CTS doesn't get confused
                 stonithPats.append(self["Pat:Fencing_ok"] % peer)
                 stonithPats.append(self["Pat:Fencing_start"] % peer)
 
             if peer != node and not upnode and self.ShouldBeStatus[peer] == "up":
                 upnode = peer
 
         # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
         if not upnode:
             return None
 
         stonith = LogWatcher(self.Env, self["LogFileName"], stonithPats, "StartupFencing", 0, hosts=[upnode])
         stonith.setwatch()
         return stonith
 
     def fencing_cleanup(self, node, stonith):
         peer_list = []
         peer_state = {}
 
         self.debug("Looking for nodes that were fenced as a result of %s starting" % node)
 
         # If we just started a node, we may now have quorum (and permission to fence)
         if not stonith:
             self.debug("Nothing to do")
             return peer_list
 
         q = self.HasQuorum(None)
         if not q and len(self.Env["nodes"]) > 2:
             # We didn't gain quorum - we shouldn't have shot anyone
             self.debug("Quorum: %d Len: %d" % (q, len(self.Env["nodes"])))
             return peer_list
 
         # Now see if any states need to be updated
         self.debug("looking for: " + repr(stonith.regexes))
         shot = stonith.look(0)
         while shot:
             line = repr(shot)
             self.debug("Found: "+ line)
             del stonith.regexes[stonith.whichmatch]
 
             # Extract node name
             for n in self.Env["nodes"]:
                 if re.search(self["Pat:Fencing_ok"] % n, shot):
                     peer = n
                     peer_state[peer] = "complete"
                     self.__instance_errorstoignore.append(self["Pat:Fencing_ok"] % peer)
 
                 elif re.search(self["Pat:Fencing_start"] % n, shot):
                     peer = n
                     peer_state[peer] = "in-progress"
                     self.__instance_errorstoignore.append(self["Pat:Fencing_start"] % peer)
 
             if not peer:
                 self.log("ERROR: Unknown stonith match: %s" % line)
 
             elif not peer in peer_list:
                 self.debug("Found peer: "+ peer)
                 peer_list.append(peer)
 
             # Get the next one
             shot = stonith.look(60)
 
         for peer in peer_list:
 
             self.debug("   Peer %s was fenced as a result of %s starting: %s" % (peer, node, peer_state[peer]))
             if self.Env["at-boot"]:
                 self.ShouldBeStatus[peer] = "up"
             else:
                 self.ShouldBeStatus[peer] = "down"
 
             if peer_state[peer] == "in-progress":
                 # Wait for any in-progress operations to complete
                 shot = stonith.look(60)
                 while len(stonith.regexes) and shot:
                     line = repr(shot)
                     self.debug("Found: "+ line)
                     del stonith.regexes[stonith.whichmatch]
                     shot = stonith.look(60)
 
             # Now make sure the node is alive too
             self.ns.WaitForNodeToComeUp(peer, self["DeadTime"])
 
             # Poll until it comes up
             if self.Env["at-boot"]:
                 if not self.StataCM(peer):
                     time.sleep(self["StartTime"])
 
                 if not self.StataCM(peer):
                     self.log("ERROR: Peer %s failed to restart after being fenced" % peer)
                     return None
 
         return peer_list
 
     def StartaCM(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node'''
         if verbose: self.log("Starting %s on node %s" %(self["Name"], node))
         else: self.debug("Starting %s on node %s" %(self["Name"], node))
         ret = 1
 
         if not self.ShouldBeStatus.has_key(node):
             self.ShouldBeStatus[node] = "down"
 
         if self.ShouldBeStatus[node] != "down":
             return 1
 
         patterns = []
         # Technically we should always be able to notice ourselves starting
         patterns.append(self["Pat:Local_started"] % node)
         if self.upcount() == 0:
             patterns.append(self["Pat:Master_started"] % node)
         else:
             patterns.append(self["Pat:Slave_started"] % node)
 
         watch = LogWatcher(
             self.Env, self["LogFileName"], patterns, "StartaCM", self["StartTime"]+10)
 
         self.install_config(node)
 
         self.ShouldBeStatus[node] = "any"
         if self.StataCM(node) and self.cluster_stable(self["DeadTime"]):
             self.log ("%s was already started" %(node))
             return 1
 
         # Clear out the host cache so autojoin can be exercised
         if self.clear_cache:
             self.debug("Removing cache file on: "+node)
             self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
 
         if not(self.Env["valgrind-tests"]):
             startCmd = self["StartCmd"]
         else:
             if self.Env["valgrind-prefix"]:
                 prefix = self.Env["valgrind-prefix"]
             else:
                 prefix = "cts"
 
             startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                 self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"])
 
         stonith = self.prepare_fencing_watcher(node)
 
         watch.setwatch()
 
         if self.rsh(node, startCmd) != 0:
             self.log ("Warn: Start command failed on node %s" %(node))
             self.fencing_cleanup(node, stonith)
             return None
 
         self.ShouldBeStatus[node]="up"
         watch_result = watch.lookforall()
 
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.log ("Warn: Startup pattern not found: %s" %(regex))
 
         if watch_result and self.cluster_stable(self["DeadTime"]):
             #self.debug("Found match: "+ repr(watch_result))
             self.fencing_cleanup(node, stonith)
             return 1
 
         elif self.StataCM(node) and self.cluster_stable(self["DeadTime"]):
             self.fencing_cleanup(node, stonith)
             return 1
 
         self.log ("Warn: Start failed for node %s" %(node))
         return None
 
     def StartaCMnoBlock(self, node, verbose=False):
 
         '''Start up the cluster manager on a given node with none-block mode'''
 
         if verbose: self.log("Starting %s on node %s" %(self["Name"], node))
         else: self.debug("Starting %s on node %s" %(self["Name"], node))
 
         # Clear out the host cache so autojoin can be exercised
         if self.clear_cache:
             self.debug("Removing cache file on: "+node)
             self.rsh(node, "rm -f "+CTSvars.HA_VARLIBHBDIR+"/hostcache")
 
         self.install_config(node)
         if not(self.Env["valgrind-tests"]):
             startCmd = self["StartCmd"]
         else:
             if self.Env["valgrind-prefix"]:
                 prefix = self.Env["valgrind-prefix"]
             else:
                 prefix = "cts"
 
             startCmd = """G_SLICE=always-malloc HA_VALGRIND_ENABLED='%s' VALGRIND_OPTS='%s --log-file=/tmp/%s-%s.valgrind' %s""" % (
                 self.Env["valgrind-procs"], self.Env["valgrind-opts"], prefix, """%p""", self["StartCmd"])
 
         self.rsh(node, startCmd, synchronous=0)
         self.ShouldBeStatus[node]="up"
         return 1
 
-    def StopaCM(self, node, verbose=False):
+    def StopaCM(self, node, verbose=False, force=False):
 
         '''Stop the cluster manager on a given node'''
 
         if verbose: self.log("Stopping %s on node %s" %(self["Name"], node))
         else: self.debug("Stopping %s on node %s" %(self["Name"], node))
 
-        if self.ShouldBeStatus[node] != "up":
+        if self.ShouldBeStatus[node] != "up" and force == False:
             return 1
 
         if self.rsh(node, self["StopCmd"]) == 0:
             # Make sure we can continue even if corosync leaks
             # fdata-* is the old name
             #self.rsh(node, "rm -f /dev/shm/qb-* /dev/shm/fdata-*")
             self.ShouldBeStatus[node]="down"
             self.cluster_stable(self["DeadTime"])
             return 1
         else:
             self.log ("ERROR: Could not stop %s on node %s" %(self["Name"], node))
 
         return None
 
     def StopaCMnoBlock(self, node):
 
         '''Stop the cluster manager on a given node with none-block mode'''
 
         self.debug("Stopping %s on node %s" %(self["Name"], node))
 
         self.rsh(node, self["StopCmd"], synchronous=0)
         self.ShouldBeStatus[node]="down"
         return 1
 
     def cluster_stable(self, timeout = None):
         time.sleep(self["StableTime"])
         return 1
 
     def node_stable(self, node):
         return 1
 
     def RereadCM(self, node):
 
         '''Force the cluster manager on a given node to reread its config
            This may be a no-op on certain cluster managers.
         '''
         rc=self.rsh(node, self["RereadCmd"])
         if rc == 0:
             return 1
         else:
             self.log ("Could not force %s on node %s to reread its config"
             %        (self["Name"], node))
         return None
 
 
     def StataCM(self, node):
 
         '''Report the status of the cluster manager on a given node'''
 
         out=self.rsh(node, self["StatusCmd"] % node, 1)
         ret= (string.find(out, 'stopped') == -1)
 
         try:
             if ret:
                 if self.ShouldBeStatus[node] == "down":
                     self.log(
                     "Node status for %s is %s but we think it should be %s"
                     %        (node, "up", self.ShouldBeStatus[node]))
             else:
                 if self.ShouldBeStatus[node] == "up":
                     self.log(
                     "Node status for %s is %s but we think it should be %s"
                     %        (node, "down", self.ShouldBeStatus[node]))
         except KeyError:        pass
 
         if ret:        self.ShouldBeStatus[node]="up"
         else:        self.ShouldBeStatus[node]="down"
         return ret
 
     def startall(self, nodelist=None, verbose=False, quick=False):
 
         '''Start the cluster manager on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
         map = {}
         if not nodelist:
             nodelist=self.Env["nodes"]
 
         for node in nodelist:
             if self.ShouldBeStatus[node] == "down":
                 self.ns.WaitForAllNodesToComeUp(nodelist, 300)
 
         if not quick:
             if not self.StartaCM(node, verbose=verbose):
                 return 0
             return 1
 
         # Approximation of SimulStartList for --boot 
         watchpats = [ ]
         watchpats.append(self["Pat:DC_IDLE"])
         for node in nodelist:
             watchpats.append(self["Pat:Local_started"] % node)
             watchpats.append(self["Pat:InfraUp"] % node)
             watchpats.append(self["Pat:PacemakerUp"] % node)
 
         #   Start all the nodes - at about the same time...
         watch = LogWatcher(self.Env, self["LogFileName"], watchpats, "fast-start", self["DeadTime"]+10)
         watch.setwatch()
 
         if not self.StartaCM(nodelist[0], verbose=verbose):
             return 0
         for node in nodelist:
             self.StartaCMnoBlock(node, verbose=verbose)
 
         watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.log ("Warn: Startup pattern not found: %s" %(regex))
 
         if not self.cluster_stable():
             self.log("Cluster did not stabilize")
             return 0
 
         return 1
 
-    def stopall(self, nodelist=None, verbose=False):
+    def stopall(self, nodelist=None, verbose=False, force=False):
 
         '''Stop the cluster managers on every node in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         ret = 1
         map = {}
         if not nodelist:
             nodelist=self.Env["nodes"]
         for node in self.Env["nodes"]:
-            if self.ShouldBeStatus[node] == "up":
-                if not self.StopaCM(node, verbose=verbose):
+            if self.ShouldBeStatus[node] == "up" or force == True:
+                if not self.StopaCM(node, verbose=verbose, force=force):
                     ret = 0
         return ret
 
     def rereadall(self, nodelist=None):
 
         '''Force the cluster managers on every node in the cluster
         to reread their config files.  We can do it on a subset of the
         cluster if nodelist is not None.
         '''
 
         map = {}
         if not nodelist:
             nodelist=self.Env["nodes"]
         for node in self.Env["nodes"]:
             if self.ShouldBeStatus[node] == "up":
                 self.RereadCM(node)
 
 
     def statall(self, nodelist=None):
 
         '''Return the status of the cluster managers in the cluster.
         We can do it on a subset of the cluster if nodelist is not None.
         '''
 
         result={}
         if not nodelist:
             nodelist=self.Env["nodes"]
         for node in nodelist:
             if self.StataCM(node):
                 result[node] = "up"
             else:
                 result[node] = "down"
         return result
 
     def isolate_node(self, target, nodes=None):
         '''isolate the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 rc = self.rsh(target, self["BreakCommCmd"] % self.key_for_node(node))
                 if rc != 0:
                     self.log("Could not break the communication between %s and %s: %d" % (target, node, rc))
                     return None
                 else:
                     self.debug("Communication cut between %s and %s" % (target, node))
         return 1
 
     def unisolate_node(self, target, nodes=None):
         '''fix the communication between the nodes'''
         if not nodes:
             nodes = self.Env["nodes"]
 
         for node in nodes:
             if node != target:
                 restored = 0
 
                 # Limit the amount of time we have asynchronous connectivity for
                 # Restore both sides as simultaneously as possible
                 self.rsh(target, self["FixCommCmd"] % self.key_for_node(node), synchronous=0)
                 self.rsh(node, self["FixCommCmd"] % self.key_for_node(target), synchronous=0)
                 self.debug("Communication restored between %s and %s" % (target, node))
 
     def reducecomm_node(self,node):
         '''reduce the communication between the nodes'''
         rc = self.rsh(node, self["ReduceCommCmd"]%(self.Env["XmitLoss"],self.Env["RecvLoss"]))
         if rc == 0:
             return 1
         else:
             self.log("Could not reduce the communication between the nodes from node: %s" % node)
         return None
 
     def restorecomm_node(self,node):
         '''restore the saved communication between the nodes'''
         rc = 0
         if float(self.Env["XmitLoss"])!=0 or float(self.Env["RecvLoss"])!=0 :
             rc = self.rsh(node, self["RestoreCommCmd"]);
         if rc == 0:
             return 1
         else:
             self.log("Could not restore the communication between the nodes from node: %s" % node)
         return None
 
     def HasQuorum(self, node_list):
         "Return TRUE if the cluster currently has quorum"
         # If we are auditing a partition, then one side will
         #   have quorum and the other not.
         # So the caller needs to tell us which we are checking
         # If no value for node_list is specified... assume all nodes
         raise ValueError("Abstract Class member (HasQuorum)")
 
     def Components(self):
         raise ValueError("Abstract Class member (Components)")
 
     def oprofileStart(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStart(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Enabling oprofile on %s" % node)
             self.rsh(node, "opcontrol --init")
             self.rsh(node, "opcontrol --setup --no-vmlinux --separate=lib --callgraph=20 --image=all")
             self.rsh(node, "opcontrol --start")
             self.rsh(node, "opcontrol --reset")
 
     def oprofileSave(self, test, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileSave(test, n)
 
         elif node in self.Env["oprofile"]:
             self.rsh(node, "opcontrol --dump")
             self.rsh(node, "opcontrol --save=cts.%d" % test)
             # Read back with: opreport -l session:cts.0 image:/usr/lib/heartbeat/c*
             if None:
                 self.rsh(node, "opcontrol --reset")
             else:
                 self.oprofileStop(node)
                 self.oprofileStart(node)
 
     def oprofileStop(self, node=None):
         if not node:
             for n in self.Env["oprofile"]:
                 self.oprofileStop(n)
 
         elif node in self.Env["oprofile"]:
             self.debug("Stopping oprofile on %s" % node)
             self.rsh(node, "opcontrol --reset")
             self.rsh(node, "opcontrol --shutdown 2>&1 > /dev/null")
 
 
 class Resource:
     '''
     This is an HA resource (not a resource group).
     A resource group is just an ordered list of Resource objects.
     '''
 
     def __init__(self, cm, rsctype=None, instance=None):
         self.CM = cm
         self.ResourceType = rsctype
         self.Instance = instance
         self.needs_quorum = 1
 
     def Type(self):
         return self.ResourceType
 
     def Instance(self, nodename):
         return self.Instance
 
     def IsRunningOn(self, nodename):
         '''
         This member function returns true if our resource is running
         on the given node in the cluster.
         It is analagous to the "status" operation on SystemV init scripts and
         heartbeat scripts.  FailSafe calls it the "exclusive" operation.
         '''
         raise ValueError("Abstract Class member (IsRunningOn)")
         return None
 
     def IsWorkingCorrectly(self, nodename):
         '''
         This member function returns true if our resource is operating
         correctly on the given node in the cluster.
         Heartbeat does not require this operation, but it might be called
         the Monitor operation, which is what FailSafe calls it.
         For remotely monitorable resources (like IP addresses), they *should*
         be monitored remotely for testing.
         '''
         raise ValueError("Abstract Class member (IsWorkingCorrectly)")
         return None
 
 
     def Start(self, nodename):
         '''
         This member function starts or activates the resource.
         '''
         raise ValueError("Abstract Class member (Start)")
         return None
 
     def Stop(self, nodename):
         '''
         This member function stops or deactivates the resource.
         '''
         raise ValueError("Abstract Class member (Stop)")
         return None
 
     def __repr__(self):
         if (self.Instance and len(self.Instance) > 1):
                 return "{" + self.ResourceType + "::" + self.Instance + "}"
         else:
                 return "{" + self.ResourceType + "}"
 class Component:
     def kill(self, node):
         None
 
 class Process(Component):
     def __init__(self, cm, name, process=None, dc_only=0, pats=[], dc_pats=[], badnews_ignore=[], common_ignore=[], triggersreboot=0):
         self.name = str(name)
         self.dc_only = dc_only
         self.pats = pats
         self.dc_pats = dc_pats
         self.CM = cm
         self.badnews_ignore = badnews_ignore
         self.badnews_ignore.extend(common_ignore)
 	self.triggersreboot = triggersreboot
 
         if process:
             self.proc = str(process)
         else:
             self.proc = str(name)
         self.KillCmd = "killall -9 " + self.proc
 
     def kill(self, node):
         if self.CM.rsh(node, self.KillCmd) != 0:
             self.CM.log ("ERROR: Kill %s failed on node %s" %(self.name,node))
             return None
         return 1
diff --git a/cts/CTSscenarios.py b/cts/CTSscenarios.py
index d2d46f9794..3ae3499fb4 100644
--- a/cts/CTSscenarios.py
+++ b/cts/CTSscenarios.py
@@ -1,561 +1,561 @@
 from CTS import *
 from CTStests import CTSTest
 from CTSaudits import ClusterAudit
 class ScenarioComponent:
 
     def __init__(self, Env):
         self.Env = Env
 
     def IsApplicable(self):
         '''Return TRUE if the current ScenarioComponent is applicable
         in the given LabEnvironment given to the constructor.
         '''
 
         raise ValueError("Abstract Class member (IsApplicable)")
 
     def SetUp(self, CM):
         '''Set up the given ScenarioComponent'''
         raise ValueError("Abstract Class member (Setup)")
 
     def TearDown(self, CM):
         '''Tear down (undo) the given ScenarioComponent'''
         raise ValueError("Abstract Class member (Setup)")
 
 class Scenario:
     (
 '''The basic idea of a scenario is that of an ordered list of
 ScenarioComponent objects.  Each ScenarioComponent is SetUp() in turn,
 and then after the tests have been run, they are torn down using TearDown()
 (in reverse order).
 
 A Scenario is applicable to a particular cluster manager iff each
 ScenarioComponent is applicable.
 
 A partially set up scenario is torn down if it fails during setup.
 ''')
 
     def __init__(self, ClusterManager, Components, Audits, Tests):
 
         "Initialize the Scenario from the list of ScenarioComponents"
         self.ClusterManager = ClusterManager
         self.Components = Components
         self.Audits  = Audits
         self.Tests = Tests
 
         self.BadNews = None
         self.TestSets = []
         self.Stats = {"success":0, "failure":0, "BadNews":0, "skipped":0}
         self.Sets = []
 
         #self.ns=CTS.NodeStatus(self.Env)
 
         for comp in Components:
             if not issubclass(comp.__class__, ScenarioComponent):
                 raise ValueError("Init value must be subclass of ScenarioComponent")
 
         for audit in Audits:
             if not issubclass(audit.__class__, ClusterAudit):
                 raise ValueError("Init value must be subclass of ClusterAudit")
 
         for test in Tests:
             if not issubclass(test.__class__, CTSTest):
                 raise ValueError("Init value must be a subclass of CTSTest")
 
     def IsApplicable(self):
         (
 '''A Scenario IsApplicable() iff each of its ScenarioComponents IsApplicable()
 '''
         )
 
         for comp in self.Components:
             if not comp.IsApplicable():
                 return None
         return 1
 
     def SetUp(self):
         '''Set up the Scenario. Return TRUE on success.'''
 
         self.audit() # Also detects remote/local log config
         self.ClusterManager.prepare()
         self.ClusterManager.ns.WaitForAllNodesToComeUp(self.ClusterManager.Env["nodes"])
 
         self.audit()
         if self.ClusterManager.Env["valgrind-tests"]:
             self.ClusterManager.install_helper("cts.supp")
 
         self.BadNews = LogWatcher(self.ClusterManager.Env,
                                   self.ClusterManager["LogFileName"],
                                   self.ClusterManager["BadRegexes"], "BadNews", 0)
         self.BadNews.setwatch() # Call after we've figured out what type of log watching to do in LogAudit
 
         j=0
         while j < len(self.Components):
             if not self.Components[j].SetUp(self.ClusterManager):
                 # OOPS!  We failed.  Tear partial setups down.
                 self.audit()
                 self.ClusterManager.log("Tearing down partial setup")
                 self.TearDown(j)
                 return None
             j=j+1
 
         self.audit()
         return 1
 
     def TearDown(self, max=None):
 
         '''Tear Down the Scenario - in reverse order.'''
 
         if max == None:
             max = len(self.Components)-1
         j=max
         while j >= 0:
             self.Components[j].TearDown(self.ClusterManager)
             j=j-1
 
         self.audit()
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not self.Stats.has_key(name):
             self.Stats[name]=0
         self.Stats[name] = self.Stats[name]+1
 
     def run(self, Iterations):
         self.ClusterManager.oprofileStart()
         try:
             self.run_loop(Iterations)
             self.ClusterManager.oprofileStop()
         except:
             self.ClusterManager.oprofileStop()
             raise
 
     def run_loop(self, Iterations):
         raise ValueError("Abstract Class member (run_loop)")
 
     def run_test(self, test, testcount):
         nodechoice = self.ClusterManager.Env.RandomNode()
 
         ret = 1
         where = ""
         did_run = 0
 
         self.ClusterManager.Env.StatsMark(testcount)
         self.ClusterManager.instance_errorstoignore_clear()
         self.ClusterManager.log(("Running test %s" % test.name).ljust(35) + (" (%s) " % nodechoice).ljust(15) +"["+ ("%d" % testcount).rjust(3) +"]")
 
         starttime = test.set_timer()
         if not test.setup(nodechoice):
             self.ClusterManager.log("Setup failed")
             ret = 0
 
         elif not test.canrunnow(nodechoice):
             self.ClusterManager.log("Skipped")
             test.skipped()
 
         else:
             did_run = 1
             ret = test(nodechoice)
 
         if not test.teardown(nodechoice):
             self.ClusterManager.log("Teardown failed")
             answer = raw_input('Continue? [nY] ')
             if answer and answer == "n":
                 raise ValueError("Teardown of %s on %s failed" % (test.name, nodechoice))
             ret = 0
 
         stoptime=time.time()
         self.ClusterManager.oprofileSave(testcount)
 
         elapsed_time = stoptime - starttime
         test_time = stoptime - test.get_timer()
         if not test.has_key("min_time"):
             test["elapsed_time"] = elapsed_time
             test["min_time"] = test_time
             test["max_time"] = test_time
         else:
             test["elapsed_time"] = test["elapsed_time"] + elapsed_time
             if test_time < test["min_time"]:
                 test["min_time"] = test_time
             if test_time > test["max_time"]:
                 test["max_time"] = test_time
 
         if ret:
             self.incr("success")
             test.log_timer()
         else:
             self.incr("failure")
             self.ClusterManager.statall()
             did_run = 1  # Force the test count to be incrimented anyway so test extraction works
 
         self.audit(test.errorstoignore())
         return did_run
 
     def summarize(self):
         self.ClusterManager.log("****************")
         self.ClusterManager.log("Overall Results:" + repr(self.Stats))
         self.ClusterManager.log("****************")
 
         stat_filter = {
             "calls":0,
             "failure":0,
             "skipped":0,
             "auditfail":0,
             }
         self.ClusterManager.log("Test Summary")
         for test in self.Tests:
             for key in stat_filter.keys():
                 stat_filter[key] = test.Stats[key]
             self.ClusterManager.log(("Test %s: "%test.name).ljust(25) + " %s"%repr(stat_filter))
 
         self.ClusterManager.debug("Detailed Results")
         for test in self.Tests:
             self.ClusterManager.debug(("Test %s: "%test.name).ljust(25) + " %s"%repr(test.Stats))
 
         self.ClusterManager.log("<<<<<<<<<<<<<<<< TESTS COMPLETED")
 
     def audit(self, LocalIgnore=[]):
         errcount=0
         ignorelist = []
         ignorelist.append("CTS:")
         ignorelist.extend(LocalIgnore)
         ignorelist.extend(self.ClusterManager.errorstoignore())
         ignorelist.extend(self.ClusterManager.instance_errorstoignore())
 
         # This makes sure everything is stabilized before starting...
         failed = 0
         for audit in self.Audits:
             if not audit():
                 self.ClusterManager.log("Audit " + audit.name() + " FAILED.")
                 failed += 1
             else:
                 self.ClusterManager.debug("Audit " + audit.name() + " passed.")
 
         while errcount < 1000:
             match = None
             if self.BadNews:
                 match=self.BadNews.look(0)
 
             if match:
                 add_err = 1
                 for ignore in ignorelist:
                     if add_err == 1 and re.search(ignore, match):
                         add_err = 0
                 if add_err == 1:
                     self.ClusterManager.log("BadNews: " + match)
                     self.incr("BadNews")
                     errcount=errcount+1
             else:
                 break
         else:
             answer = raw_input('Big problems.  Continue? [nY]')
             if answer and answer == "n":
                 self.ClusterManager.log("Shutting down.")
                 self.summarize()
                 self.TearDown()
                 raise ValueError("Looks like we hit a BadNews jackpot!")
 
         return failed
 
 class AllOnce(Scenario):
     '''Every Test Once''' # Accessable as __doc__
     def run_loop(self, Iterations):
         testcount=1
         for test in self.Tests:
             self.run_test(test, testcount)
             testcount += 1
 
 class RandomTests(Scenario):
     '''Random Test Execution'''
     def run_loop(self, Iterations):
         testcount=1
         while testcount <= Iterations:
             test = self.ClusterManager.Env.RandomGen.choice(self.Tests)
             self.run_test(test, testcount)
             testcount += 1
 
 class BasicSanity(Scenario):
     '''Basic Cluster Sanity'''
     def run_loop(self, Iterations):
         testcount=1
         while testcount <= Iterations:
             test = self.Environment.RandomGen.choice(self.Tests)
             self.run_test(test, testcount)
             testcount += 1
 
 class Sequence(Scenario):
     '''Named Tests in Sequence'''
     def run_loop(self, Iterations):
         testcount=1
         while testcount <= Iterations:
             for test in self.Tests:
                 self.run_test(test, testcount)
                 testcount += 1
 
 class Boot(Scenario):
     '''Start the Cluster'''
     def run_loop(self, Iterations):
         testcount=0
 
 class BootCluster(ScenarioComponent):
     (
 '''BootCluster is the most basic of ScenarioComponents.
 This ScenarioComponent simply starts the cluster manager on all the nodes.
 It is fairly robust as it waits for all nodes to come up before starting
 as they might have been rebooted or crashed for some reason beforehand.
 ''')
     def __init__(self, Env):
         pass
 
     def IsApplicable(self):
         '''BootCluster is so generic it is always Applicable'''
         return 1
 
     def SetUp(self, CM):
         '''Basic Cluster Manager startup.  Start everything'''
 
         CM.prepare()
 
         #        Clear out the cobwebs ;-)
-        self.TearDown(CM)
+        self.TearDown(CM, force=True)
 
         # Now start the Cluster Manager on all the nodes.
         CM.log("Starting Cluster Manager on all nodes.")
         return CM.startall(verbose=True, quick=True)
 
-    def TearDown(self, CM):
+    def TearDown(self, CM, force=False):
         '''Set up the given ScenarioComponent'''
 
         # Stop the cluster manager everywhere
 
         CM.log("Stopping Cluster Manager on all nodes")
-        return CM.stopall(verbose=True)
+        return CM.stopall(verbose=True, force=force)
 
 class LeaveBooted(BootCluster):
     def TearDown(self, CM):
         '''Set up the given ScenarioComponent'''
 
         # Stop the cluster manager everywhere
 
         CM.log("Leaving Cluster running on all nodes")
         return 1
 
 class PingFest(ScenarioComponent):
     (
 '''PingFest does a flood ping to each node in the cluster from the test machine.
 
 If the LabEnvironment Parameter PingSize is set, it will be used as the size
 of ping packet requested (via the -s option).  If it is not set, it defaults
 to 1024 bytes.
 
 According to the manual page for ping:
     Outputs packets as fast as they come back or one hundred times per
     second, whichever is more.  For every ECHO_REQUEST sent a period ``.''
     is printed, while for every ECHO_REPLY received a backspace is printed.
     This provides a rapid display of how many packets are being dropped.
     Only the super-user may use this option.  This can be very hard on a net-
     work and should be used with caution.
 ''' )
 
     def __init__(self, Env):
         self.Env = Env
 
     def IsApplicable(self):
         '''PingFests are always applicable ;-)
         '''
 
         return 1
 
     def SetUp(self, CM):
         '''Start the PingFest!'''
 
         self.PingSize=1024
         if CM.Env.has_key("PingSize"):
                 self.PingSize=CM.Env["PingSize"]
 
         CM.log("Starting %d byte flood pings" % self.PingSize)
 
         self.PingPids=[]
         for node in CM.Env["nodes"]:
             self.PingPids.append(self._pingchild(node))
 
         CM.log("Ping PIDs: " + repr(self.PingPids))
         return 1
 
     def TearDown(self, CM):
         '''Stop it right now!  My ears are pinging!!'''
 
         for pid in self.PingPids:
             if pid != None:
                 CM.log("Stopping ping process %d" % pid)
                 os.kill(pid, signal.SIGKILL)
 
     def _pingchild(self, node):
 
         Args = ["ping", "-qfn", "-s", str(self.PingSize), node]
 
 
         sys.stdin.flush()
         sys.stdout.flush()
         sys.stderr.flush()
         pid = os.fork()
 
         if pid < 0:
             self.Env.log("Cannot fork ping child")
             return None
         if pid > 0:
             return pid
 
 
         # Otherwise, we're the child process.
 
 
         os.execvp("ping", Args)
         self.Env.log("Cannot execvp ping: " + repr(Args))
         sys.exit(1)
 
 class PacketLoss(ScenarioComponent):
     (
 '''
 It would be useful to do some testing of CTS with a modest amount of packet loss
 enabled - so we could see that everything runs like it should with a certain
 amount of packet loss present.
 ''')
 
     def IsApplicable(self):
         '''always Applicable'''
         return 1
 
     def SetUp(self, CM):
         '''Reduce the reliability of communications'''
         if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
             return 1
 
         for node in CM.Env["nodes"]:
             CM.reducecomm_node(node)
 
         CM.log("Reduce the reliability of communications")
 
         return 1
 
 
     def TearDown(self, CM):
         '''Fix the reliability of communications'''
 
         if float(CM.Env["XmitLoss"]) == 0 and float(CM.Env["RecvLoss"]) == 0 :
             return 1
 
         for node in CM.Env["nodes"]:
             CM.unisolate_node(node)
 
         CM.log("Fix the reliability of communications")
 
 
 class BasicSanityCheck(ScenarioComponent):
     (
 '''
 ''')
 
     def IsApplicable(self):
         return self.Env["DoBSC"]
 
     def SetUp(self, CM):
 
         CM.prepare()
 
         # Clear out the cobwebs
-        self.TearDown(CM)
+        self.TearDown(CM, force=True)
 
         # Now start the Cluster Manager on all the nodes.
         CM.log("Starting Cluster Manager on BSC node(s).")
         return CM.startall()
 
     def TearDown(self, CM):
         CM.log("Stopping Cluster Manager on BSC node(s).")
         return CM.stopall()
 
 class Benchmark(ScenarioComponent):
     (
 '''
 ''')
 
     def IsApplicable(self):
         return self.Env["benchmark"]
 
     def SetUp(self, CM):
 
         CM.prepare()
 
         # Clear out the cobwebs
-        self.TearDown(CM)
+        self.TearDown(CM, force=True)
 
         # Now start the Cluster Manager on all the nodes.
         CM.log("Starting Cluster Manager on all node(s).")
         return CM.startall()
 
     def TearDown(self, CM):
         CM.log("Stopping Cluster Manager on all node(s).")
         return CM.stopall()
 
 class RollingUpgrade(ScenarioComponent):
     (
 '''
 Test a rolling upgrade between two versions of the stack
 ''')
 
     def __init__(self, Env):
         self.Env = Env
 
     def IsApplicable(self):
         if not self.Env["rpm-dir"]:
             return None
         if not self.Env["current-version"]:
             return None
         if not self.Env["previous-version"]:
             return None
 
         return 1
 
     def install(self, node, version):
 
         target_dir = "/tmp/rpm-%s" % version
         src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
 
         rc = self.CM.rsh(node, "mkdir -p %s" % target_dir)
         rc = self.CM.cp("%s/*.rpm %s:%s" % (src_dir, node, target_dir))
         rc = self.CM.rsh(node, "rpm -Uvh --force %s/*.rpm" % (target_dir))
 
         return self.success()
 
     def upgrade(self, node):
         return self.install(node, self.CM.Env["current-version"])
 
     def downgrade(self, node):
         return self.install(node, self.CM.Env["previous-version"])
 
     def SetUp(self, CM):
         CM.prepare()
 
         # Clear out the cobwebs
-        CM.stopall()
+        CM.stopall(force=True)
 
         CM.log("Downgrading all nodes to %s." % self.Env["previous-version"])
 
         for node in self.Env["nodes"]:
             if not self.downgrade(node):
                 CM.log("Couldn't downgrade %s" % node)
                 return None
 
         return 1
 
     def TearDown(self, CM):
         # Stop everything
         CM.log("Stopping Cluster Manager on Upgrade nodes.")
         CM.stopall()
 
         CM.log("Upgrading all nodes to %s." % self.Env["current-version"])
         for node in self.Env["nodes"]:
             if not self.upgrade(node):
                 CM.log("Couldn't upgrade %s" % node)
                 return None
 
         return 1
diff --git a/cts/CTStests.py b/cts/CTStests.py
index 310efb9222..eb8e704776 100644
--- a/cts/CTStests.py
+++ b/cts/CTStests.py
@@ -1,2802 +1,2859 @@
 '''CTS: Cluster Testing System: Tests module
 
 There are a few things we want to do here:
 
  '''
 
 __copyright__='''
 Copyright (C) 2000, 2001 Alan Robertson <alanr@unix.sh>
 Licensed under the GNU GPL.
 
 Add RecourceRecover testcase Zhao Kai <zhaokai@cn.ibm.com>
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 #
 #        SPECIAL NOTE:
 #
 #        Tests may NOT implement any cluster-manager-specific code in them.
 #        EXTEND the ClusterManager object to provide the base capabilities
 #        the test needs if you need to do something that the current CM classes
 #        do not.  Otherwise you screw up the whole point of the object structure
 #        in CTS.
 #
 #                Thank you.
 #
 
 import time, os, re, types, string, tempfile, sys
 from stat import *
 from cts import CTS
 from cts.CTSaudits import *
 from cts.CTSvars   import *
 
 AllTestClasses = [ ]
 
 class CTSTest:
     '''
     A Cluster test.
     We implement the basic set of properties and behaviors for a generic
     cluster test.
 
     Cluster tests track their own statistics.
     We keep each of the kinds of counts we track as separate {name,value}
     pairs.
     '''
 
     def __init__(self, cm):
         #self.name="the unnamed test"
         self.Stats = {"calls":0
         ,        "success":0
         ,        "failure":0
         ,        "skipped":0
         ,        "auditfail":0}
 
 #        if not issubclass(cm.__class__, ClusterManager):
 #            raise ValueError("Must be a ClusterManager object")
         self.CM = cm
         self.Audits = []
         self.timeout=120
         self.passed = 1
         self.is_loop = 0
         self.is_unsafe = 0
         self.is_experimental = 0
         self.is_container = 0
         self.is_valgrind = 0
         self.benchmark = 0  # which tests to benchmark
         self.timer = {}  # timers
 
     def has_key(self, key):
         return self.Stats.has_key(key)
 
     def __setitem__(self, key, value):
         self.Stats[key] = value
 
     def __getitem__(self, key):
         return self.Stats[key]
 
     def log_mark(self, msg):
         self.CM.debug("MARK: test %s %s %d" % (self.name,msg,time.time()))
         return
 
     def get_timer(self,key = "test"):
         try: return self.timer[key]
         except: return 0
 
     def set_timer(self,key = "test"):
         self.timer[key] = time.time()
         return self.timer[key]
 
     def log_timer(self,key = "test"):
         elapsed = 0
         if key in self.timer:
             elapsed = time.time() - self.timer[key]
             s = key == "test" and self.name or "%s:%s" %(self.name,key)
             self.CM.debug("%s runtime: %.2f" % (s, elapsed))
             del self.timer[key]
         return elapsed
 
     def incr(self, name):
         '''Increment (or initialize) the value associated with the given name'''
         if not self.Stats.has_key(name):
             self.Stats[name]=0
         self.Stats[name] = self.Stats[name]+1
 
         # Reset the test passed boolean
         if name == "calls":
             self.passed = 1
 
     def failure(self, reason="none"):
         '''Increment the failure count'''
         self.passed = 0
         self.incr("failure")
         self.CM.log(("Test %s" % self.name).ljust(35)  +" FAILED: %s" % reason)
         return None
 
     def success(self):
         '''Increment the success count'''
         self.incr("success")
         return 1
 
     def skipped(self):
         '''Increment the skipped count'''
         self.incr("skipped")
         return 1
 
     def __call__(self, node):
         '''Perform the given test'''
         raise ValueError("Abstract Class member (__call__)")
         self.incr("calls")
         return self.failure()
 
     def audit(self):
         passed = 1
         if len(self.Audits) > 0:
             for audit in self.Audits:
                 if not audit():
                     self.CM.log("Internal %s Audit %s FAILED." % (self.name, audit.name()))
                     self.incr("auditfail")
                     passed = 0
         return passed
 
     def setup(self, node):
         '''Setup the given test'''
         return self.success()
 
     def teardown(self, node):
         '''Tear down the given test'''
         return self.success()
 
     def create_watch(self, patterns, timeout, name=None):
         if not name:
             name = self.name
         return CTS.LogWatcher(self.CM.Env, self.CM["LogFileName"], patterns, name, timeout)
 
     def local_badnews(self, prefix, watch, local_ignore=[]):
         errcount = 0
         if not prefix:
             prefix = "LocalBadNews:"
 
         ignorelist = []
         ignorelist.append(" CTS: ")
         ignorelist.append(prefix)
         ignorelist.extend(local_ignore)
 
         while errcount < 100:
             match=watch.look(0)
             if match:
                add_err = 1
                for ignore in ignorelist:
                    if add_err == 1 and re.search(ignore, match):
                        add_err = 0
                if add_err == 1:
                    self.CM.log(prefix + " " + match)
                    errcount=errcount+1
             else:
               break
         else:
             self.CM.log("Too many errors!")
 
         return errcount
 
     def is_applicable(self):
         return self.is_applicable_common()
 
     def is_applicable_common(self):
         '''Return TRUE if we are applicable in the current test configuration'''
         #raise ValueError("Abstract Class member (is_applicable)")
 
         if self.is_loop and not self.CM.Env["loop-tests"]:
             return 0
         elif self.is_unsafe and not self.CM.Env["unsafe-tests"]:
             return 0
         elif self.is_valgrind and not self.CM.Env["valgrind-tests"]:
             return 0
         elif self.is_experimental and not self.CM.Env["experimental-tests"]:
             return 0
         elif self.is_container and not self.CM.Env["container-tests"]:
             return 0
         elif self.CM.Env["benchmark"] and self.benchmark == 0:
             return 0
 
         return 1
 
     def find_ocfs2_resources(self, node):
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         (rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "o2cb" and r.parent != "NA":
                     self.CM.debug("Found o2cb: %s" % self.r_o2cb)
                     self.r_o2cb = r.parent
             if re.search("^Constraint", line):
                 c = AuditConstraint(self.CM, line)
                 if c.type == "rsc_colocation" and c.target == self.r_o2cb:
                     self.r_ocfs2.append(c.rsc)
 
         self.CM.debug("Found ocfs2 filesystems: %s" % repr(self.r_ocfs2))
         return len(self.r_ocfs2)
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         return 1
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return []
 
 ###################################################################
 class StopTest(CTSTest):
 ###################################################################
     '''Stop (deactivate) the cluster manager on a node'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name="Stop"
 
     def __call__(self, node):
         '''Perform the 'stop' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] != "up":
             return self.skipped()
 
         patterns = []
         # Technically we should always be able to notice ourselves stopping
         patterns.append(self.CM["Pat:We_stopped"] % node)
 
         #if self.CM.Env["use_logd"]:
         #    patterns.append(self.CM["Pat:Logd_stopped"] % node)
 
         # Any active node needs to notice this one left
         # NOTE: This wont work if we have multiple partitions
         for other in self.CM.Env["nodes"]:
             if self.CM.ShouldBeStatus[other] == "up" and other != node:
                 patterns.append(self.CM["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
                 #self.debug("Checking %s will notice %s left"%(other, node))
 
         watch = self.create_watch(patterns, self.CM["DeadTime"])
         watch.setwatch()
 
         if node == self.CM.OurNode:
             self.incr("us")
         else:
             if self.CM.upcount() <= 1:
                 self.incr("all")
             else:
                 self.incr("them")
 
         self.CM.StopaCM(node)
         watch_result = watch.lookforall()
 
         failreason=None
         UnmatchedList = "||"
         if watch.unmatched:
             (rc, output) = self.CM.rsh(node, "/bin/ps axf", None)
             for line in output:
                 self.CM.debug(line)
 
             (rc, output) = self.CM.rsh(node, "/usr/sbin/dlm_tool dump", None)
             for line in output:
                 self.CM.debug(line)
 
             for regex in watch.unmatched:
                 self.CM.log ("ERROR: Shutdown pattern not found: %s" % (regex))
                 UnmatchedList +=  regex + "||";
                 failreason="Missing shutdown pattern"
 
         self.CM.cluster_stable(self.CM["DeadTime"])
 
         if not watch.unmatched or self.CM.upcount() == 0:
             return self.success()
 
         if len(watch.unmatched) >= self.CM.upcount():
             return self.failure("no match against (%s)" % UnmatchedList)
 
         if failreason == None:
             return self.success()
         else:
             return self.failure(failreason)
 #
 # We don't register StopTest because it's better when called by
 # another test...
 #
 
 ###################################################################
 class StartTest(CTSTest):
 ###################################################################
     '''Start (activate) the cluster manager on a node'''
     def __init__(self, cm, debug=None):
         CTSTest.__init__(self,cm)
         self.name="start"
         self.debug = debug
 
     def __call__(self, node):
         '''Perform the 'start' test. '''
         self.incr("calls")
 
         if self.CM.upcount() == 0:
             self.incr("us")
         else:
             self.incr("them")
 
         if self.CM.ShouldBeStatus[node] != "down":
             return self.skipped()
         elif self.CM.StartaCM(node):
             return self.success()
         else:
             return self.failure("Startup %s on node %s failed"
                                 %(self.CM["Name"], node))
 
 #
 # We don't register StartTest because it's better when called by
 # another test...
 #
 
 ###################################################################
 class FlipTest(CTSTest):
 ###################################################################
     '''If it's running, stop it.  If it's stopped start it.
        Overthrow the status quo...
     '''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="Flip"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, node):
         '''Perform the 'Flip' test. '''
         self.incr("calls")
         if self.CM.ShouldBeStatus[node] == "up":
             self.incr("stopped")
             ret = self.stop(node)
             type="up->down"
             # Give the cluster time to recognize it's gone...
             time.sleep(self.CM["StableTime"])
         elif self.CM.ShouldBeStatus[node] == "down":
             self.incr("started")
             ret = self.start(node)
             type="down->up"
         else:
             return self.skipped()
 
         self.incr(type)
         if ret:
             return self.success()
         else:
             return self.failure("%s failure" % type)
 
 #        Register FlipTest as a good test to run
 AllTestClasses.append(FlipTest)
 
 ###################################################################
 class RestartTest(CTSTest):
 ###################################################################
     '''Stop and restart a node'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="Restart"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         '''Perform the 'restart' test. '''
         self.incr("calls")
 
         self.incr("node:" + node)
 
         ret1 = 1
         if self.CM.StataCM(node):
             self.incr("WasStopped")
             if not self.start(node):
                 return self.failure("start (setup) failure: "+node)
 
         self.set_timer()
         if not self.stop(node):
             return self.failure("stop failure: "+node)
         if not self.start(node):
             return self.failure("start failure: "+node)
         return self.success()
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RestartTest)
 
 ###################################################################
 class StonithdTest(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name="Stonithd"
         self.startall = SimulStartLite(cm)
         self.benchmark = 1
 
     def __call__(self, node):
         self.incr("calls")
         if len(self.CM.Env["nodes"]) < 2:
             return self.skipped()
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         is_dc = self.CM.is_node_dc(node)
 
         watchpats = []
         watchpats.append("log_operation: Operation .* for host '%s' with device .* returned: 0" % node)
         watchpats.append("tengine_stonith_notify: Peer %s was terminated .*: OK" % node)
 
         if self.CM.Env["at-boot"] == 0:
             self.CM.debug("Expecting %s to stay down" % node)
             self.CM.ShouldBeStatus[node]="down"
         else:
             self.CM.debug("Expecting %s to come up again %d" % (node, self.CM.Env["at-boot"]))
             watchpats.append("%s .*do_state_transition: .* S_STARTING -> S_PENDING" % node)
             watchpats.append("%s .*do_state_transition: .* S_PENDING -> S_NOT_DC" % node)
 
         watch = self.create_watch(watchpats, 30 + self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
         watch.setwatch()
 
         origin = self.CM.Env.RandomGen.choice(self.CM.Env["nodes"])
 
         rc = self.CM.rsh(origin, "stonith_admin --reboot %s -VVVVVV" % node)
 
         if rc == 194:
             # 194 - 256 = -62 = Timer expired
             #
             # Look for the patterns, usually this means the required
             # device was running on the node to be fenced - or that
             # the required devices were in the process of being loaded
             # and/or moved
             #
             # Effectively the node committed suicide so there will be
             # no confirmation, but pacemaker should be watching and
             # fence the node again
 
             self.CM.log("Fencing command on %s to fence %s timed out" % (origin, node))
 
         elif origin != node and rc != 0:
             self.CM.debug("Waiting for the cluster to recover")
             self.CM.cluster_stable()
 
             self.CM.debug("Waiting STONITHd node to come back up")
             self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
 
             self.CM.log("Fencing command on %s failed to fence %s (rc=%d)" % (origin, node, rc))
 
         elif origin == node and rc != 255:
             # 255 == broken pipe, ie. the node was fenced as epxected
             self.CM.log("Logcally originated fencing returned %d" % rc)
 
 
         self.set_timer("fence")
         matched = watch.lookforall()
         self.log_timer("fence")
         self.set_timer("reform")
         if watch.unmatched:
             self.CM.log("Patterns not found: " + repr(watch.unmatched))
 
         self.CM.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.CM.debug("Waiting STONITHd node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
 
         self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.CM["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected patterns")
         elif not is_stable:
             return self.failure("Cluster did not become stable")
 
         self.log_timer("reform")
         return self.success()
 
     def errorstoignore(self):
         return [
             self.CM["Pat:Fencing_start"] % ".*",
             self.CM["Pat:Fencing_ok"] % ".*",
             "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery",
             "error: remote_op_done: Operation reboot of .*by .* for stonith_admin.*: Timer expired",
             ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
 
         if self.CM.Env.has_key("DoFencing"):
             return self.CM.Env["DoFencing"]
 
         return 1
 
 AllTestClasses.append(StonithdTest)
 
 ###################################################################
 class StartOnebyOne(CTSTest):
 ###################################################################
     '''Start all the nodes ~ one by one'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="StartOnebyOne"
         self.stopall = SimulStopLite(cm)
         self.start = StartTest(cm)
         self.ns=CTS.NodeStatus(cm.Env)
 
     def __call__(self, dummy):
         '''Perform the 'StartOnebyOne' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Test setup failed")
 
         failed=[]
         self.set_timer()
         for node in self.CM.Env["nodes"]:
             if not self.start(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to start: " + repr(failed))
 
         return self.success()
 
 #        Register StartOnebyOne as a good test to run
 AllTestClasses.append(StartOnebyOne)
 
 ###################################################################
 class SimulStart(CTSTest):
 ###################################################################
     '''Start all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="SimulStart"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStart' test. '''
         self.incr("calls")
 
         #        We ignore the "node" parameter...
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
         self.CM.clear_all_caches()
 
         if not self.startall(None):
             return self.failure("Startall failed")
 
         return self.success()
 
 #        Register SimulStart as a good test to run
 AllTestClasses.append(SimulStart)
 
 ###################################################################
 class SimulStop(CTSTest):
 ###################################################################
     '''Stop all the nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="SimulStop"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'SimulStop' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.stopall(None):
             return self.failure("Stopall failed")
 
         return self.success()
 
 #     Register SimulStop as a good test to run
 AllTestClasses.append(SimulStop)
 
 ###################################################################
 class StopOnebyOne(CTSTest):
 ###################################################################
     '''Stop all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="StopOnebyOne"
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
 
     def __call__(self, dummy):
         '''Perform the 'StopOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         failed=[]
         self.set_timer()
         for node in self.CM.Env["nodes"]:
             if not self.stop(node):
                 failed.append(node)
 
         if len(failed) > 0:
             return self.failure("Some node failed to stop: " + repr(failed))
 
         self.CM.clear_all_caches()
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(StopOnebyOne)
 
 ###################################################################
 class RestartOnebyOne(CTSTest):
 ###################################################################
     '''Restart all the nodes in order'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="RestartOnebyOne"
         self.startall = SimulStartLite(cm)
 
     def __call__(self, dummy):
         '''Perform the 'RestartOnebyOne' test. '''
         self.incr("calls")
 
         #     We ignore the "node" parameter...
 
         #     Start up all the nodes...
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         did_fail=[]
         self.set_timer()
         self.restart = RestartTest(self.CM)
         for node in self.CM.Env["nodes"]:
             if not self.restart(node):
                 did_fail.append(node)
 
         if did_fail:
             return self.failure("Could not restart %d nodes: %s"
                                 %(len(did_fail), repr(did_fail)))
         return self.success()
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(RestartOnebyOne)
 
 ###################################################################
 class PartialStart(CTSTest):
 ###################################################################
     '''Start a node - but tell it to stop before it finishes starting up'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="PartialStart"
         self.startall = SimulStartLite(cm)
         self.stopall = SimulStopLite(cm)
         self.stop = StopTest(cm)
         #self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'PartialStart' test. '''
         self.incr("calls")
 
         ret = self.stopall(None)
         if not ret:
             return self.failure("Setup failed")
 
 #   FIXME!  This should use the CM class to get the pattern
 #       then it would be applicable in general
         watchpats = []
         watchpats.append("crmd.*Connecting to cluster infrastructure")
         watch = self.create_watch(watchpats, self.CM["DeadTime"]+10)
         watch.setwatch()
 
         self.CM.StartaCMnoBlock(node)
         ret = watch.lookforall()
         if not ret:
             self.CM.log("Patterns not found: " + repr(watch.unmatched))
             return self.failure("Setup of %s failed" % node)
 
         ret = self.stop(node)
         if not ret:
             return self.failure("%s did not stop in time" % node)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
 
         # We might do some fencing in the 2-node case if we make it up far enough
         return [ """Executing reboot fencing operation""" ]
 
 #     Register StopOnebyOne as a good test to run
 AllTestClasses.append(PartialStart)
 
 #######################################################################
 class StandbyTest(CTSTest):
 #######################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="Standby"
         self.benchmark = 1
 
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
 
     # make sure the node is active
     # set the node to standby mode
     # check resources, none resource should be running on the node
     # set the node to active mode
     # check resouces, resources should have been migrated back (SHOULD THEY?)
 
     def __call__(self, node):
 
         self.incr("calls")
         ret=self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         self.CM.debug("Make sure node %s is active" % node)
         if self.CM.StandbyStatus(node) != "off":
             if not self.CM.SetStandbyMode(node, "off"):
                 return self.failure("can't set node %s to active mode" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
 
         self.CM.debug("Getting resources running on node %s" % node)
         rsc_on_node = self.CM.active_resources(node)
 
         watchpats = []
         watchpats.append("do_state_transition:.*-> S_POLICY_ENGINE")
         watch = self.create_watch(watchpats, self.CM["DeadTime"]+10)
         watch.setwatch()
 
         self.CM.debug("Setting node %s to standby mode" % node)
         if not self.CM.SetStandbyMode(node, "on"):
             return self.failure("can't set node %s to standby mode" % node)
 
         self.set_timer("on")
 
         ret = watch.lookforall()
         if not ret:
             self.CM.log("Patterns not found: " + repr(watch.unmatched))
             self.CM.SetStandbyMode(node, "off")
             return self.failure("cluster didn't react to standby change on %s" % node)
 
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "on":
             return self.failure("standby status of %s is [%s] but we expect [on]" % (node, status))
         self.log_timer("on")
 
         self.CM.debug("Checking resources")
         bad_run = self.CM.active_resources(node)
         if len(bad_run) > 0:
             rc = self.failure("%s set to standby, %s is still running on it" % (node, repr(bad_run)))
             self.CM.debug("Setting node %s to active mode" % node)
             self.CM.SetStandbyMode(node, "off")
             return rc
 
         self.CM.debug("Setting node %s to active mode" % node)
         if not self.CM.SetStandbyMode(node, "off"):
             return self.failure("can't set node %s to active mode" % node)
 
         self.set_timer("off")
         self.CM.cluster_stable()
 
         status = self.CM.StandbyStatus(node)
         if status != "off":
             return self.failure("standby status of %s is [%s] but we expect [off]" % (node, status))
         self.log_timer("off")
 
         return self.success()
 
 AllTestClasses.append(StandbyTest)
 
 #######################################################################
 class ValgrindTest(CTSTest):
 #######################################################################
     '''Check for memory leaks'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="Valgrind"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_valgrind = 1
         self.is_loop = 1
 
     def setup(self, node):
         self.incr("calls")
 
         ret=self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         # Enable valgrind
         self.logPat = "/tmp/%s-*.valgrind" % self.name
 
         self.CM.Env["valgrind-prefix"] = self.name
 
         self.CM.rsh(node, "rm -f %s" % self.logPat, None)
 
         ret=self.startall(None)
         if not ret:
             return self.failure("Start all nodes failed")
 
         for node in self.CM.Env["nodes"]:
             (rc, output) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
             for line in output:
                 self.CM.debug(line)
 
         return self.success()
 
     def teardown(self, node):
         # Disable valgrind
         self.CM.Env["valgrind-prefix"] = None
 
         # Return all nodes to normal
         ret=self.stopall(None)
         if not ret:
             return self.failure("Stop all nodes failed")
 
         return self.success()
 
     def find_leaks(self):
         # Check for leaks
         leaked = []
         self.stop = StopTest(self.CM)
 
         for node in self.CM.Env["nodes"]:
             (rc, ps_out) = self.CM.rsh(node, "ps u --ppid `pidofproc aisexec`", None)
             rc = self.stop(node)
             if not rc:
                 self.failure("Couldn't shut down %s" % node)
 
             rc = self.CM.rsh(node, "grep -e indirectly.*lost:.*[1-9] -e definitely.*lost:.*[1-9] -e (ERROR|error).*SUMMARY:.*[1-9].*errors %s" % self.logPat, 0)
             if rc != 1:
                 leaked.append(node)
                 self.failure("Valgrind errors detected on %s" % node)
                 for line in ps_out:
                     self.CM.log(line)
                 (rc, output) = self.CM.rsh(node, "grep -e lost: -e SUMMARY: %s" % self.logPat, None)
                 for line in output:
                     self.CM.log(line)
                 (rc, output) = self.CM.rsh(node, "cat %s" % self.logPat, None)
                 for line in output:
                     self.CM.debug(line)
 
         self.CM.rsh(node, "rm -f %s" % self.logPat, None)
         return leaked
 
     def __call__(self, node):
         leaked = self.find_leaks()
         if len(leaked) > 0:
             return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [ """cib:.*readCibXmlFile:""", """HA_VALGRIND_ENABLED""" ]
 
 #######################################################################
 class StandbyLoopTest(ValgrindTest):
 #######################################################################
     '''Check for memory leaks by putting a node in and out of standby for an hour'''
     def __init__(self, cm):
         ValgrindTest.__init__(self,cm)
         self.name="StandbyLoop"
 
     def __call__(self, node):
 
         lpc = 0
         delay = 2
         failed = 0
         done=time.time() + self.CM.Env["loop-minutes"]*60
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "on"):
                 self.failure("can't set node %s to standby mode" % node)
                 failed = lpc
 
             time.sleep(delay)
             if not self.CM.SetStandbyMode(node, "off"):
                 self.failure("can't set node %s to active mode" % node)
                 failed = lpc
 
         leaked = self.find_leaks()
         if failed:
             return self.failure("Iteration %d failed" % failed)
         elif len(leaked) > 0:
             return self.failure("Nodes %s leaked" % repr(leaked))
 
         return self.success()
 
 AllTestClasses.append(StandbyLoopTest)
 
 ##############################################################################
 class BandwidthTest(CTSTest):
 ##############################################################################
 #        Tests should not be cluster-manager-specific
 #        If you need to find out cluster manager configuration to do this, then
 #        it should be added to the generic cluster manager API.
     '''Test the bandwidth which heartbeat uses'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name = "Bandwidth"
         self.start = StartTest(cm)
         self.__setitem__("min",0)
         self.__setitem__("max",0)
         self.__setitem__("totalbandwidth",0)
         self.tempfile = tempfile.mktemp(".cts")
         self.startall = SimulStartLite(cm)
 
     def __call__(self, node):
         '''Perform the Bandwidth test'''
         self.incr("calls")
 
         if self.CM.upcount()<1:
             return self.skipped()
 
         Path = self.CM.InternalCommConfig()
         if "ip" not in Path["mediatype"]:
              return self.skipped()
 
         port = Path["port"][0]
         port = int(port)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Test setup failed")
         time.sleep(5)  # We get extra messages right after startup.
 
 
         fstmpfile = "/var/run/band_estimate"
         dumpcmd = "tcpdump -p -n -c 102 -i any udp port %d > %s 2>&1" \
         %                (port, fstmpfile)
 
         rc = self.CM.rsh(node, dumpcmd)
         if rc == 0:
             farfile = "root@%s:%s" % (node, fstmpfile)
             self.CM.rsh.cp(farfile, self.tempfile)
             Bandwidth = self.countbandwidth(self.tempfile)
             if not Bandwidth:
                 self.CM.log("Could not compute bandwidth.")
                 return self.success()
             intband = int(Bandwidth + 0.5)
             self.CM.log("...bandwidth: %d bits/sec" % intband)
             self.Stats["totalbandwidth"] = self.Stats["totalbandwidth"] + Bandwidth
             if self.Stats["min"] == 0:
                 self.Stats["min"] = Bandwidth
             if Bandwidth > self.Stats["max"]:
                 self.Stats["max"] = Bandwidth
             if Bandwidth < self.Stats["min"]:
                 self.Stats["min"] = Bandwidth
             self.CM.rsh(node, "rm -f %s" % fstmpfile)
             os.unlink(self.tempfile)
             return self.success()
         else:
             return self.failure("no response from tcpdump command [%d]!" % rc)
 
     def countbandwidth(self, file):
         fp = open(file, "r")
         fp.seek(0)
         count = 0
         sum = 0
         while 1:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count=count+1
                 linesplit = string.split(line," ")
                 for j in range(len(linesplit)-1):
                     if linesplit[j]=="udp": break
                     if linesplit[j]=="length:": break
 
                 try:
                     sum = sum + int(linesplit[j+1])
                 except ValueError:
                     self.CM.log("Invalid tcpdump line: %s" % line)
                     return None
                 T1 = linesplit[0]
                 timesplit = string.split(T1,":")
                 time2split = string.split(timesplit[2],".")
                 time1 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
                 break
 
         while count < 100:
             line = fp.readline()
             if not line:
                 return None
             if re.search("udp",line) or re.search("UDP,", line):
                 count = count+1
                 linessplit = string.split(line," ")
                 for j in range(len(linessplit)-1):
                     if linessplit[j] =="udp": break
                     if linesplit[j]=="length:": break
                 try:
                     sum=int(linessplit[j+1])+sum
                 except ValueError:
                     self.CM.log("Invalid tcpdump line: %s" % line)
                     return None
 
         T2 = linessplit[0]
         timesplit = string.split(T2,":")
         time2split = string.split(timesplit[2],".")
         time2 = (long(timesplit[0])*60+long(timesplit[1]))*60+long(time2split[0])+long(time2split[1])*0.000001
         time = time2-time1
         if (time <= 0):
             return 0
         return (sum*8)/time
 
     def is_applicable(self):
         '''BandwidthTest never applicable'''
         return 0
 
 AllTestClasses.append(BandwidthTest)
 
 
 ###################################################################
 class MaintenanceMode(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="MaintenanceMode"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max=30
         #self.is_unsafe = 1
         self.benchmark = 1
         self.action = "asyncmon"
         self.interval = 0
         self.rid="maintenanceDummy"
 
     def toggleMaintenanceMode(self, node, action):
         pats = []
         pats.append(self.CM["Pat:DC_IDLE"])
 
         # fail the resource right after turning Maintenance mode on
         # verify it is not recovered until maintenance mode is turned off
         if action == "On":
             pats.append("Updating failcount for %s on .* after .* %s" % (self.rid, self.action))
         else:
             pats.append(self.CM["Pat:RscOpOK"] % (self.rid, "stop_0"))
             pats.append(self.CM["Pat:RscOpOK"] % (self.rid, "start_0"))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.CM.debug("Turning maintenance mode %s" % action)
         self.CM.rsh(node, self.CM["MaintenanceMode%s" % (action)])
         if (action == "On"):
             self.CM.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover%s" % (action))
         watch.lookforall()
         self.log_timer("recover%s" % (action))
         if watch.unmatched:
             self.CM.debug("Failed to find patterns when turning maintenance mode %s" % action)
             return repr(watch.unmatched)
 
         return ""
 
     def insertMaintenanceDummy(self, node):
         pats = []
         pats.append(("%s.*" % node) + (self.CM["Pat:RscOpOK"] % (self.rid, "start_0")))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.CM.AddDummyRsc(node, self.rid)
 
         self.set_timer("addDummy")
         watch.lookforall()
         self.log_timer("addDummy")
 
         if watch.unmatched:
             self.CM.debug("Failed to find patterns when adding maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def removeMaintenanceDummy(self, node):
         pats = []
         pats.append(self.CM["Pat:RscOpOK"] % (self.rid, "stop_0"))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
         self.CM.RemoveDummyRsc(node, self.rid)
 
         self.set_timer("removeDummy")
         watch.lookforall()
         self.log_timer("removeDummy")
 
         if watch.unmatched:
             self.CM.debug("Failed to find patterns when removing maintenance dummy resource")
             return repr(watch.unmatched)
         return ""
 
     def managedRscList(self, node):
         rscList = []
         (rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if tmp.managed():
                     rscList.append(tmp.id)
 
         return rscList
 
     def verifyResources(self, node, rscList, managed):
         managedList = list(rscList)
         managed_str = "managed"
         if not managed:
             managed_str = "unmanaged"
 
         (rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if managed and not tmp.managed():
                     continue
                 elif not managed and tmp.managed():
                     continue
                 elif managedList.count(tmp.id):
                     managedList.remove(tmp.id)
 
         if len(managedList) == 0:
             self.CM.debug("Found all %s resources on %s" % (managed_str, node))
             return True
 
         self.CM.log("Could not find all %s resources on %s. %s" % (managed_str, node, managedList))
         return False
 
     def __call__(self, node):
         '''Perform the 'MaintenanceMode' test. '''
         self.incr("calls")
         verify_managed = False
         verify_unmanaged = False
         failPat = ""
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         # get a list of all the managed resources. We use this list
         # after enabling maintenance mode to verify all managed resources
         # become un-managed.  After maintenance mode is turned off, we use
         # this list to verify all the resources become managed again.
         managedResources = self.managedRscList(node)
         if len(managedResources) == 0:
             self.CM.log("No managed resources on %s" % node)
             return self.skipped()
 
         # insert a fake resource we can fail during maintenance mode
         # so we can verify recovery does not take place until after maintenance
         # mode is disabled.
         failPat = failPat + self.insertMaintenanceDummy(node)
 
         # toggle maintenance mode ON, then fail dummy resource.
         failPat = failPat + self.toggleMaintenanceMode(node, "On")
 
         # verify all the resources are now unmanaged
         if self.verifyResources(node, managedResources, False):
             verify_unmanaged = True
 
         # Toggle maintenance mode  OFF, verify dummy is recovered.
         failPat = failPat + self.toggleMaintenanceMode(node, "Off")
 
         # verify all the resources are now managed again
         if self.verifyResources(node, managedResources, True):
             verify_managed = True
 
         # Remove our maintenance dummy resource.
         failPat = failPat + self.removeMaintenanceDummy(node)
 
         self.CM.cluster_stable()
 
         if failPat != "":
             return self.failure("Unmatched patterns: %s" % (failPat))
         elif verify_unmanaged is False:
             return self.failure("Failed to verify resources became unmanaged during maintenance mode")
         elif verify_managed is False:
             return self.failure("Failed to verify resources switched back to managed after disabling maintenance mode")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [ """Updating failcount for %s""" % self.rid,
                  """LogActions: Recover %s""" % self.rid,
                  """Unknown operation: fail""",
                  """(ERROR|error): sending stonithRA op to stonithd failed.""",
                  self.CM["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))),
                  """(ERROR|error): process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval),
                 ]
 
 AllTestClasses.append(MaintenanceMode)
 
 ###################################################################
 class ResourceRecover(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="ResourceRecover"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.max=30
         self.rid=None
         self.rid_alt=None
         #self.is_unsafe = 1
         self.benchmark = 1
 
         # these are the values used for the new LRM API call
         self.action = "asyncmon"
         self.interval = 0
 
     def __call__(self, node):
         '''Perform the 'ResourceRecover' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         resourcelist = self.CM.active_resources(node)
         # if there are no resourcelist, return directly
         if len(resourcelist)==0:
             self.CM.log("No active resources on %s" % node)
             return self.skipped()
 
         self.rid = self.CM.Env.RandomGen.choice(resourcelist)
         self.rid_alt = self.rid
 
         rsc = None
         (rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 tmp = AuditResource(self.CM, line)
                 if tmp.id == self.rid:
                     rsc = tmp
                     # Handle anonymous clones that get renamed
                     self.rid = rsc.clone_id
                     break
 
         if not rsc:
             return self.failure("Could not find %s in the resource list" % self.rid)
 
         self.CM.debug("Shooting %s aka. %s" % (rsc.clone_id, rsc.id))
 
         pats = []
         pats.append("Updating failcount for %s on .* after .* %s"
                     % (self.rid, self.action))
 
         if rsc.managed():
             pats.append(self.CM["Pat:RscOpOK"] % (self.rid, "stop_0"))
             if rsc.unique():
                 pats.append(self.CM["Pat:RscOpOK"] % (self.rid, "start_0"))
             else:
                 # Anonymous clones may get restarted with a different clone number
                 pats.append(self.CM["Pat:RscOpOK"] % (".*", "start_0"))
 
         watch = self.create_watch(pats, 60)
         watch.setwatch()
 
         self.CM.rsh(node, "crm_resource -V -F -r %s -H %s &>/dev/null" % (self.rid, node))
 
         self.set_timer("recover")
         watch.lookforall()
         self.log_timer("recover")
 
         self.CM.cluster_stable()
         recovered=self.CM.ResourceLocation(self.rid)
 
         if watch.unmatched:
             return self.failure("Patterns not found: %s" % repr(watch.unmatched))
 
         elif rsc.unique() and len(recovered) > 1:
             return self.failure("%s is now active on more than one node: %s"%(self.rid, repr(recovered)))
 
         elif len(recovered) > 0:
             self.CM.debug("%s is running on: %s" %(self.rid, repr(recovered)))
 
         elif rsc.managed():
             return self.failure("%s was not recovered and is inactive" % self.rid)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [ """Updating failcount for %s""" % self.rid,
                  """LogActions: Recover %s""" % self.rid,
                  """LogActions: Recover %s""" % self.rid_alt,
                  """Unknown operation: fail""",
                  """(ERROR|error): sending stonithRA op to stonithd failed.""",
                  self.CM["Pat:RscOpOK"] % (self.rid, ("%s_%d" % (self.action, self.interval))),
                  """(ERROR|error): process_graph_event: Action %s_%s_%d .* initiated outside of a transition""" % (self.rid, self.action, self.interval),
                  ]
 
 AllTestClasses.append(ResourceRecover)
 
 ###################################################################
 class ComponentFail(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="ComponentFail"
         self.startall = SimulStartLite(cm)
         self.complist = cm.Components()
         self.patterns = []
         self.okerrpatterns = []
         self.is_unsafe = 1
 
     def __call__(self, node):
         '''Perform the 'ComponentFail' test. '''
         self.incr("calls")
         self.patterns = []
         self.okerrpatterns = []
 
         # start all nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         if not self.CM.cluster_stable(self.CM["StableTime"]):
             return self.failure("Setup failed - unstable")
 
         node_is_dc = self.CM.is_node_dc(node, None)
 
         # select a component to kill
         chosen = self.CM.Env.RandomGen.choice(self.complist)
         while chosen.dc_only == 1 and node_is_dc == 0:
             chosen = self.CM.Env.RandomGen.choice(self.complist)
 
         self.CM.debug("...component %s (dc=%d,boot=%d)" % (chosen.name, node_is_dc,chosen.triggersreboot))
         self.incr(chosen.name)
 
         if chosen.name != "aisexec" and chosen.name != "corosync":
             if self.CM["Name"] != "crm-lha" or chosen.name != "pengine":
                 self.patterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
                 self.patterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
 
         self.patterns.extend(chosen.pats)
         if node_is_dc:
           self.patterns.extend(chosen.dc_pats)
 
         # In an ideal world, this next stuff should be in the "chosen" object as a member function
         if self.CM["Name"] == "crm-lha" and chosen.triggersreboot:
             # Make sure the node goes down and then comes back up if it should reboot...
             for other in self.CM.Env["nodes"]:
                 if other != node:
                     self.patterns.append(self.CM["Pat:They_stopped"] %(other, self.CM.key_for_node(node)))
             self.patterns.append(self.CM["Pat:Slave_started"] % node)
             self.patterns.append(self.CM["Pat:Local_started"] % node)
 
             if chosen.dc_only:
                 # Sometimes these will be in the log, and sometimes they won't...
                 self.okerrpatterns.append("%s .*Process %s:.* exited" %(node, chosen.name))
                 self.okerrpatterns.append("%s .*I_ERROR.*crmdManagedChildDied" %node)
                 self.okerrpatterns.append("%s .*The %s subsystem terminated unexpectedly" %(node, chosen.name))
                 self.okerrpatterns.append("(ERROR|error): Client .* exited with return code")
             else:
                 # Sometimes this won't be in the log...
                 self.okerrpatterns.append(self.CM["Pat:ChildKilled"] %(node, chosen.name))
                 self.okerrpatterns.append(self.CM["Pat:ChildRespawn"] %(node, chosen.name))
                 self.okerrpatterns.append(self.CM["Pat:ChildExit"])
 
         # supply a copy so self.patterns doesnt end up empty
         tmpPats = []
         tmpPats.extend(self.patterns)
         self.patterns.extend(chosen.badnews_ignore)
 
         # Look for STONITH ops, depending on Env["at-boot"] we might need to change the nodes status
         stonithPats = []
         stonithPats.append(self.CM["Pat:Fencing_ok"] % node)
         stonith = self.create_watch(stonithPats, 0)
         stonith.setwatch()
 
         # set the watch for stable
         watch = self.create_watch(
             tmpPats, self.CM["DeadTime"] + self.CM["StableTime"] + self.CM["StartTime"])
         watch.setwatch()
 
         # kill the component
         chosen.kill(node)
 
         self.CM.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         self.CM.debug("Waiting for any STONITHd node to come back up")
         self.CM.ns.WaitForAllNodesToComeUp(self.CM.Env["nodes"], 600)
 
         self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
         self.CM.cluster_stable(self.CM["StartTime"])
 
         self.CM.debug("Checking if %s was shot" % node)
         shot = stonith.look(60)
         if shot:
             self.CM.debug("Found: "+ repr(shot))
             self.okerrpatterns.append(self.CM["Pat:Fencing_start"] % node)
 
             if self.CM.Env["at-boot"] == 0:
                 self.CM.ShouldBeStatus[node]="down"
 
             # If fencing occurred, chances are many (if not all) the expected logs
             # will not be sent - or will be lost when the node reboots
             return self.success()
 
         # check for logs indicating a graceful recovery
         matched = watch.lookforall(allow_multiple_matches=1)
         if watch.unmatched:
             self.CM.log("Patterns not found: " + repr(watch.unmatched))
 
         self.CM.debug("Waiting for the cluster to re-stabilize with all nodes")
         is_stable = self.CM.cluster_stable(self.CM["StartTime"])
 
         if not matched:
             return self.failure("Didn't find all expected %s patterns" % chosen.name)
         elif not is_stable:
             return self.failure("Cluster did not become stable after killing %s" % chosen.name)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
     # Note that okerrpatterns refers to the last time we ran this test
     # The good news is that this works fine for us...
         self.okerrpatterns.extend(self.patterns)
         return self.okerrpatterns
 
 AllTestClasses.append(ComponentFail)
 
 ####################################################################
 class SplitBrainTest(CTSTest):
 ####################################################################
     '''It is used to test split-brain. when the path between the two nodes break
        check the two nodes both take over the resource'''
     def __init__(self,cm):
         CTSTest.__init__(self,cm)
         self.name = "SplitBrain"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.is_experimental = 1
 
     def isolate_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.CM.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]) + " from " +repr(partition))
 
         if len(other_nodes) == 0:
             return 1
 
         self.CM.debug("Creating partition: " + repr(partition))
         self.CM.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             if not self.CM.isolate_node(node, other_nodes):
                 self.CM.log("Could not isolate %s" % node)
                 return 0
 
         return 1
 
     def heal_partition(self, partition):
         other_nodes = []
         other_nodes.extend(self.CM.Env["nodes"])
 
         for node in partition:
             try:
                 other_nodes.remove(node)
             except ValueError:
                 self.CM.log("Node "+node+" not in " + repr(self.CM.Env["nodes"]))
 
         if len(other_nodes) == 0:
             return 1
 
         self.CM.debug("Healing partition: " + repr(partition))
         self.CM.debug("Everyone else: " + repr(other_nodes))
 
         for node in partition:
             self.CM.unisolate_node(node, other_nodes)
 
     def __call__(self, node):
         '''Perform split-brain test'''
         self.incr("calls")
         self.passed = 1
         partitions = {}
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed")
 
         while 1:
             # Retry until we get multiple partitions
             partitions = {}
             p_max = len(self.CM.Env["nodes"])
             for node in self.CM.Env["nodes"]:
                 p = self.CM.Env.RandomGen.randint(1, p_max)
                 if not partitions.has_key(p):
                     partitions[p]= []
                 partitions[p].append(node)
             p_max = len(partitions.keys())
             if p_max > 1:
                 break
             # else, try again
 
         self.CM.debug("Created %d partitions" % p_max)
         for key in partitions.keys():
             self.CM.debug("Partition["+str(key)+"]:\t"+repr(partitions[key]))
 
         # Disabling STONITH to reduce test complexity for now
         self.CM.rsh(node, "crm_attribute -V -n stonith-enabled -v false")
 
         for key in partitions.keys():
             self.isolate_partition(partitions[key])
 
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != p_max:
                 time.sleep(10)
             else:
                 break
         else:
             self.failure("Expected partitions were not created")
 
         # Target number of partitions formed - wait for stability
         if not self.CM.cluster_stable():
             self.failure("Partitioned cluster not stable")
 
         # Now audit the cluster state
         self.CM.partitions_expected = p_max
         if not self.audit():
             self.failure("Audits failed")
         self.CM.partitions_expected = 1
 
         # And heal them again
         for key in partitions.keys():
             self.heal_partition(partitions[key])
 
         # Wait for a single partition to form
         count = 30
         while count > 0:
             if len(self.CM.find_partitions()) != 1:
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not reform")
 
         # Wait for it to have the right number of members
         count = 30
         while count > 0:
             members = []
 
             partitions = self.CM.find_partitions()
             if len(partitions) > 0:
                 members = partitions[0].split()
 
             if len(members) != len(self.CM.Env["nodes"]):
                 time.sleep(10)
                 count -= 1
             else:
                 break
         else:
             self.failure("Cluster did not completely reform")
 
         # Wait up to 20 minutes - the delay is more preferable than
         # trying to continue with in a messed up state
         if not self.CM.cluster_stable(1200):
             self.failure("Reformed cluster not stable")
             answer = raw_input('Continue? [nY]')
             if answer and answer == "n":
                 raise ValueError("Reformed cluster not stable")
 
         # Turn fencing back on
         if self.CM.Env["DoFencing"]:
             self.CM.rsh(node, "crm_attribute -V -D -n stonith-enabled")
 
         self.CM.cluster_stable()
 
         if self.passed:
             return self.success()
         return self.failure("See previous errors")
 
     def errorstoignore(self):
         '''Return list of errors which are 'normal' and should be ignored'''
         return [
             "Another DC detected:",
             "(ERROR|error): attrd_cib_callback: .*Application of an update diff failed",
             "crmd_ha_msg_callback:.*not in our membership list",
             "CRIT:.*node.*returning after partition",
             ]
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
         return len(self.CM.Env["nodes"]) > 2
 
 AllTestClasses.append(SplitBrainTest)
 
 ####################################################################
 class Reattach(CTSTest):
 ####################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="Reattach"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
         self.is_unsafe = 0 # Handled by canrunnow()
 
     def setup(self, node):
         attempt=0
         if not self.startall(None):
             return None
 
         # Make sure we are really _really_ stable and that all
         # resources, including those that depend on transient node
         # attributes, are started
         while not self.CM.cluster_stable(double_check=True):
             if attempt < 5:
                 attempt += 1
                 self.CM.debug("Not stable yet, re-testing")
             else:
                 self.CM.log("Cluster is not stable")
                 return None
 
         return 1
 
     def teardown(self, node):
 
         # Make sure 'node' is up
         start = StartTest(self.CM)
         start(node)
 
         is_managed = self.CM.rsh(node, "crm_attribute -Q -G -t crm_config -n is-managed-default -d true", 1)
         is_managed = is_managed[:-1] # Strip off the newline
         if is_managed != "true":
             self.CM.log("Attempting to re-enable resource management on %s (%s)" % (node, is_managed))
             managed = self.create_watch(["is-managed-default"], 60)
             managed.setwatch()
 
             self.CM.rsh(node, "crm_attribute -V -D -n is-managed-default")
 
             if not managed.lookforall():
                 self.CM.log("Patterns not found: " + repr(managed.unmatched))
                 self.CM.log("Could not re-enable resource management")
                 return 0
 
         return 1
 
     def canrunnow(self, node):
         '''Return TRUE if we can meaningfully run right now'''
         if self.find_ocfs2_resources(node):
             self.CM.log("Detach/Reattach scenarios are not possible with OCFS2 services present")
             return 0
         return 1
 
     def __call__(self, node):
         self.incr("calls")
 
         pats = []
         managed = self.create_watch(["is-managed-default"], 60)
         managed.setwatch()
 
         self.CM.debug("Disable resource management")
         self.CM.rsh(node, "crm_attribute -V -n is-managed-default -v false")
 
         if not managed.lookforall():
             self.CM.log("Patterns not found: " + repr(managed.unmatched))
             return self.failure("Resource management not disabled")
 
         pats = []
         pats.append(self.CM["Pat:RscOpOK"] % (".*", "start"))
         pats.append(self.CM["Pat:RscOpOK"] % (".*", "stop"))
         pats.append(self.CM["Pat:RscOpOK"] % (".*", "promote"))
         pats.append(self.CM["Pat:RscOpOK"] % (".*", "demote"))
         pats.append(self.CM["Pat:RscOpOK"] % (".*", "migrate"))
 
         watch = self.create_watch(pats, 60, "ShutdownActivity")
         watch.setwatch()
 
         self.CM.debug("Shutting down the cluster")
         ret = self.stopall(None)
         if not ret:
             self.CM.debug("Re-enable resource management")
             self.CM.rsh(node, "crm_attribute -V -D -n is-managed-default")
             return self.failure("Couldn't shut down the cluster")
 
         self.CM.debug("Bringing the cluster back up")
         ret = self.startall(None)
         time.sleep(5) # allow ping to update the CIB
         if not ret:
             self.CM.debug("Re-enable resource management")
             self.CM.rsh(node, "crm_attribute -V -D -n is-managed-default")
             return self.failure("Couldn't restart the cluster")
 
         if self.local_badnews("ResourceActivity:", watch):
             self.CM.debug("Re-enable resource management")
             self.CM.rsh(node, "crm_attribute -V -D -n is-managed-default")
             return self.failure("Resources stopped or started during cluster restart")
 
         watch = self.create_watch(pats, 60, "StartupActivity")
         watch.setwatch()
 
         managed = self.create_watch(["is-managed-default"], 60)
         managed.setwatch()
 
         self.CM.debug("Re-enable resource management")
         self.CM.rsh(node, "crm_attribute -V -D -n is-managed-default")
 
         if not managed.lookforall():
             self.CM.log("Patterns not found: " + repr(managed.unmatched))
             return self.failure("Resource management not enabled")
 
         self.CM.cluster_stable()
 
         # Ignore actions for STONITH resources
         ignore = []
         (rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rclass == "stonith":
 
                     self.CM.debug("Ignoring start actions for %s" % r.id)
                     ignore.append(self.CM["Pat:RscOpOK"] % (r.id, "start_0"))
 
         if self.local_badnews("ResourceActivity:", watch, ignore):
             return self.failure("Resources stopped or started after resource management was re-enabled")
 
         return ret
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [
             "resources were active at shutdown",
             "pingd: .*(ERROR|error): send_ipc_message:",
             "pingd: .*(ERROR|error): send_update:",
             "lrmd: .*(ERROR|error): notify_client:",
             ]
 
     def is_applicable(self):
         if self.CM["Name"] == "crm-lha":
             return None
         return 1
 
 AllTestClasses.append(Reattach)
 
 ####################################################################
 class SpecialTest1(CTSTest):
 ####################################################################
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="SpecialTest1"
         self.startall = SimulStartLite(cm)
         self.restart1 = RestartTest(cm)
         self.stopall = SimulStopLite(cm)
 
     def __call__(self, node):
         '''Perform the 'SpecialTest1' test for Andrew. '''
         self.incr("calls")
 
         #        Shut down all the nodes...
         ret = self.stopall(None)
         if not ret:
             return self.failure("Could not stop all nodes")
 
         # Test config recovery when the other nodes come up
         self.CM.rsh(node, "rm -f "+CTSvars.CRM_CONFIG_DIR+"/cib*")
 
         #        Start the selected node
         ret = self.restart1(node)
         if not ret:
             return self.failure("Could not start "+node)
 
         #        Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Could not start the remaining nodes")
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         # Errors that occur as a result of the CIB being wiped
         return [
             """warning: retrieveCib: Cluster configuration not found:""",
             """error: cib_perform_op: v1 patchset error, patch failed to apply: Application of an update diff failed""",
             """error: unpack_resources: Resource start-up disabled since no STONITH resources have been defined""",
             """error: unpack_resources: Either configure some or disable STONITH with the stonith-enabled option""",
             """error: unpack_resources: NOTE: Clusters with shared data need STONITH to ensure data integrity""",
         ]
 
 AllTestClasses.append(SpecialTest1)
 
 ####################################################################
 class HAETest(CTSTest):
 ####################################################################
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="HAETest"
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
         self.is_loop = 1
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
         return self.success()
 
     def wait_on_state(self, node, resource, expected_clones, attempts=240):
         while attempts > 0:
             active=0
             (rc, lines) = self.CM.rsh(node, "crm_resource -r %s -W -Q" % resource, stdout=None)
 
             # Hack until crm_resource does the right thing
             if rc == 0 and lines:
                 active = len(lines)
 
             if len(lines) == expected_clones:
                 return 1
 
             elif rc == 1:
                 self.CM.debug("Resource %s is still inactive" % resource)
 
             elif rc == 234:
                 self.CM.log("Unknown resource %s" % resource)
                 return 0
 
             elif rc == 246:
                 self.CM.log("Cluster is inactive")
                 return 0
 
             elif rc != 0:
                 self.CM.log("Call to crm_resource failed, rc=%d" % rc)
                 return 0
 
             else:
                 self.CM.debug("Resource %s is active on %d times instead of %d" % (resource, active, expected_clones))
 
             attempts -= 1
             time.sleep(1)
 
         return 0
 
     def find_dlm(self, node):
         self.r_dlm = None
 
         (rc, lines) = self.CM.rsh(node, "crm_resource -c", None)
         for line in lines:
             if re.search("^Resource", line):
                 r = AuditResource(self.CM, line)
                 if r.rtype == "controld" and r.parent != "NA":
                     self.CM.debug("Found dlm: %s" % self.r_dlm)
                     self.r_dlm = r.parent
                     return 1
         return 0
 
     def find_hae_resources(self, node):
         self.r_dlm = None
         self.r_o2cb = None
         self.r_ocfs2 = []
 
         if self.find_dlm(node):
             self.find_ocfs2_resources(node)
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return 0
         if self.CM.Env["Schema"] == "hae":
             return 1
         return None
 
 ####################################################################
 class HAERoleTest(HAETest):
 ####################################################################
     def __init__(self, cm):
         '''Lars' mount/unmount test for the HA extension. '''
         HAETest.__init__(self,cm)
         self.name="HAERoleTest"
 
     def change_state(self, node, resource, target):
         rc = self.CM.rsh(node, "crm_resource -V -r %s -p target-role -v %s  --meta" % (resource, target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
         lpc = 0
         failed = 0
         delay = 2
         done=time.time() + self.CM.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.CM.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "Stopped")
             if not self.wait_on_state(node, self.r_dlm, 0):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "Started")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAERoleTest)
 
 ####################################################################
 class HAEStandbyTest(HAETest):
 ####################################################################
     '''Set up a custom test to cause quorum failure issues for Andrew'''
     def __init__(self, cm):
         HAETest.__init__(self,cm)
         self.name="HAEStandbyTest"
 
     def change_state(self, node, resource, target):
         rc = self.CM.rsh(node, "crm_standby -V -l reboot -v %s" % (target))
         return rc
 
     def __call__(self, node):
         self.incr("calls")
 
         lpc = 0
         failed = 0
         done=time.time() + self.CM.Env["loop-minutes"]*60
         self.find_hae_resources(node)
 
         clone_max = len(self.CM.Env["nodes"])
         while time.time() <= done and not failed:
             lpc = lpc + 1
 
             self.change_state(node, self.r_dlm, "true")
             if not self.wait_on_state(node, self.r_dlm, clone_max-1):
                 self.failure("%s did not go down correctly" % self.r_dlm)
                 failed = lpc
 
             self.change_state(node, self.r_dlm, "false")
             if not self.wait_on_state(node, self.r_dlm, clone_max):
                 self.failure("%s did not come up correctly" % self.r_dlm)
                 failed = lpc
 
             if not self.wait_on_state(node, self.r_o2cb, clone_max):
                 self.failure("%s did not come up correctly" % self.r_o2cb)
                 failed = lpc
 
             for fs in self.r_ocfs2:
                 if not self.wait_on_state(node, fs, clone_max):
                     self.failure("%s did not come up correctly" % fs)
                     failed = lpc
 
         if failed:
             return self.failure("iteration %d failed" % failed)
         return self.success()
 
 AllTestClasses.append(HAEStandbyTest)
 
 ###################################################################
 class NearQuorumPointTest(CTSTest):
 ###################################################################
     '''
     This test brings larger clusters near the quorum point (50%).
     In addition, it will test doing starts and stops at the same time.
 
     Here is how I think it should work:
     - loop over the nodes and decide randomly which will be up and which
       will be down  Use a 50% probability for each of up/down.
     - figure out what to do to get into that state from the current state
     - in parallel, bring up those going up  and bring those going down.
     '''
 
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="NearQuorumPoint"
 
     def __call__(self, dummy):
         '''Perform the 'NearQuorumPoint' test. '''
         self.incr("calls")
         startset = []
         stopset = []
 
         stonith = self.CM.prepare_fencing_watcher("NearQuorumPoint")
         #decide what to do with each node
         for node in self.CM.Env["nodes"]:
             action = self.CM.Env.RandomGen.choice(["start","stop"])
             #action = self.CM.Env.RandomGen.choice(["start","stop","no change"])
             if action == "start" :
                 startset.append(node)
             elif action == "stop" :
                 stopset.append(node)
 
         self.CM.debug("start nodes:" + repr(startset))
         self.CM.debug("stop nodes:" + repr(stopset))
 
         #add search patterns
         watchpats = [ ]
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 watchpats.append(self.CM["Pat:We_stopped"] % node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 #watchpats.append(self.CM["Pat:Slave_started"] % node)
                 watchpats.append(self.CM["Pat:Local_started"] % node)
             else:
                 for stopping in stopset:
                     if self.CM.ShouldBeStatus[stopping] == "up":
                         watchpats.append(self.CM["Pat:They_stopped"] % (node, self.CM.key_for_node(stopping)))
 
         if len(watchpats) == 0:
             return self.skipped()
 
         if len(startset) != 0:
             watchpats.append(self.CM["Pat:DC_IDLE"])
 
         watch = self.create_watch(watchpats, self.CM["DeadTime"]+10)
 
         watch.setwatch()
 
         #begin actions
         for node in stopset:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
 
         for node in startset:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.CM.StartaCMnoBlock(node)
 
         #get the result
         if watch.lookforall():
             self.CM.cluster_stable()
             self.CM.fencing_cleanup("NearQuorumPoint", stonith)
             return self.success()
 
         self.CM.log("Warn: Patterns not found: " + repr(watch.unmatched))
 
         #get the "bad" nodes
         upnodes = []
         for node in stopset:
             if self.CM.StataCM(node) == 1:
                 upnodes.append(node)
 
         downnodes = []
         for node in startset:
             if self.CM.StataCM(node) == 0:
                 downnodes.append(node)
 
         self.CM.fencing_cleanup,("NearQuorumPoint", stonith)
         if upnodes == [] and downnodes == []:
             self.CM.cluster_stable()
 
             # Make sure they're completely down with no residule
             for node in stopset:
                 self.CM.rsh(node, self.CM["StopCmd"])
 
             return self.success()
 
         if len(upnodes) > 0:
             self.CM.log("Warn: Unstoppable nodes: " + repr(upnodes))
 
         if len(downnodes) > 0:
             self.CM.log("Warn: Unstartable nodes: " + repr(downnodes))
 
         return self.failure()
 
     def is_applicable(self):
         if self.CM["Name"] == "crm-cman":
             return None
         return 1
 
 AllTestClasses.append(NearQuorumPointTest)
 
 ###################################################################
 class RollingUpgradeTest(CTSTest):
 ###################################################################
     '''Perform a rolling upgrade of the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="RollingUpgrade"
         self.start = StartTest(cm)
         self.stop = StopTest(cm)
         self.stopall = SimulStopLite(cm)
         self.startall = SimulStartLite(cm)
 
     def setup(self, node):
         #  Start all remaining nodes
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.CM.Env["nodes"]:
             if not self.downgrade(node, None):
                 return self.failure("Couldn't downgrade %s" % node)
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Couldn't start all nodes")
         return self.success()
 
     def teardown(self, node):
         # Stop everything
         ret = self.stopall(None)
         if not ret:
             return self.failure("Couldn't stop all nodes")
 
         for node in self.CM.Env["nodes"]:
             if not self.upgrade(node, None):
                 return self.failure("Couldn't upgrade %s" % node)
 
         return self.success()
 
     def install(self, node, version, start=1, flags="--force"):
 
         target_dir = "/tmp/rpm-%s" % version
         src_dir = "%s/%s" % (self.CM.Env["rpm-dir"], version)
 
         self.CM.log("Installing %s on %s with %s" % (version, node, flags))
         if not self.stop(node):
             return self.failure("stop failure: "+node)
 
         rc = self.CM.rsh(node, "mkdir -p %s" % target_dir)
         rc = self.CM.rsh(node, "rm -f %s/*.rpm" % target_dir)
         (rc, lines) = self.CM.rsh(node, "ls -1 %s/*.rpm" % src_dir, None)
         for line in lines:
             line = line[:-1]
             rc = self.CM.rsh.cp("%s" % (line), "%s:%s/" % (node, target_dir))
         rc = self.CM.rsh(node, "rpm -Uvh %s %s/*.rpm" % (flags, target_dir))
 
         if start and not self.start(node):
             return self.failure("start failure: "+node)
 
         return self.success()
 
     def upgrade(self, node, start=1):
         return self.install(node, self.CM.Env["current-version"], start)
 
     def downgrade(self, node, start=1):
         return self.install(node, self.CM.Env["previous-version"], start, "--force --nodeps")
 
     def __call__(self, node):
         '''Perform the 'Rolling Upgrade' test. '''
         self.incr("calls")
 
         for node in self.CM.Env["nodes"]:
             if self.upgrade(node):
                 return self.failure("Couldn't upgrade %s" % node)
 
             self.CM.cluster_stable()
 
         return self.success()
 
     def is_applicable(self):
         if not self.is_applicable_common():
             return None
 
         if not self.CM.Env.has_key("rpm-dir"):
             return None
         if not self.CM.Env.has_key("current-version"):
             return None
         if not self.CM.Env.has_key("previous-version"):
             return None
 
         return 1
 
 #        Register RestartTest as a good test to run
 AllTestClasses.append(RollingUpgradeTest)
 
 ###################################################################
 class BSC_AddResource(CTSTest):
 ###################################################################
     '''Add a resource to the cluster'''
     def __init__(self, cm):
         CTSTest.__init__(self, cm)
         self.name="AddResource"
         self.resource_offset = 0
         self.cib_cmd="""cibadmin -C -o %s -X '%s' """
 
     def __call__(self, node):
         self.incr("calls")
         self.resource_offset =         self.resource_offset  + 1
 
         r_id = "bsc-rsc-%s-%d" % (node, self.resource_offset)
         start_pat = "crmd.*%s_start_0.*confirmed.*ok"
 
         patterns = []
         patterns.append(start_pat % r_id)
 
         watch = self.create_watch(patterns, self.CM["DeadTime"])
         watch.setwatch()
 
         ip = self.NextIP()
         if not self.make_ip_resource(node, r_id, "ocf", "IPaddr", ip):
             return self.failure("Make resource %s failed" % r_id)
 
         failed = 0
         watch_result = watch.lookforall()
         if watch.unmatched:
             for regex in watch.unmatched:
                 self.CM.log ("Warn: Pattern not found: %s" % (regex))
                 failed = 1
 
         if failed:
             return self.failure("Resource pattern(s) not found")
 
         if not self.CM.cluster_stable(self.CM["DeadTime"]):
             return self.failure("Unstable cluster")
 
         return self.success()
 
     def NextIP(self):
         ip = self.CM.Env["IPBase"]
         if ":" in ip:
             fields = ip.rpartition(":")
             fields[2] = str(hex(int(fields[2], 16)+1))
             print str(hex(int(f[2], 16)+1))
         else:
             fields = ip.rpartition('.')
             fields[2] = str(int(fields[2])+1)
 
         ip = fields[0] + fields[1] + fields[3];
         self.CM.Env["IPBase"] = ip
         return ip.strip()
 
     def make_ip_resource(self, node, id, rclass, type, ip):
         self.CM.log("Creating %s::%s:%s (%s) on %s" % (rclass,type,id,ip,node))
         rsc_xml="""
 <primitive id="%s" class="%s" type="%s"  provider="heartbeat">
     <instance_attributes id="%s"><attributes>
         <nvpair id="%s" name="ip" value="%s"/>
     </attributes></instance_attributes>
 </primitive>""" % (id, rclass, type, id, id, ip)
 
         node_constraint="""
       <rsc_location id="run_%s" rsc="%s">
         <rule id="pref_run_%s" score="100">
           <expression id="%s_loc_expr" attribute="#uname" operation="eq" value="%s"/>
         </rule>
       </rsc_location>""" % (id, id, id, id, node)
 
         rc = 0
         (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("constraints", node_constraint), None)
         if rc != 0:
             self.CM.log("Constraint creation failed: %d" % rc)
             return None
 
         (rc, lines) = self.CM.rsh(node, self.cib_cmd % ("resources", rsc_xml), None)
         if rc != 0:
             self.CM.log("Resource creation failed: %d" % rc)
             return None
 
         return 1
 
     def is_applicable(self):
         if self.CM.Env["DoBSC"]:
             return 1
         return None
 
 AllTestClasses.append(BSC_AddResource)
 
 class SimulStopLite(CTSTest):
 ###################################################################
     '''Stop any active nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="SimulStopLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStopLite' setup work. '''
         self.incr("calls")
 
         self.CM.debug("Setup: " + self.name)
 
         #     We ignore the "node" parameter...
         watchpats = [ ]
 
         for node in self.CM.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.incr("WasStarted")
                 watchpats.append(self.CM["Pat:We_stopped"] % node)
                 #if self.CM.Env["use_logd"]:
                 #    watchpats.append(self.CM["Pat:Logd_stopped"] % node)
 
         if len(watchpats) == 0:
             self.CM.clear_all_caches()
             return self.success()
 
         #     Stop all the nodes - at about the same time...
         watch = self.create_watch(watchpats, self.CM["DeadTime"]+10)
 
         watch.setwatch()
         self.set_timer()
         for node in self.CM.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "up":
                 self.CM.StopaCMnoBlock(node)
         if watch.lookforall():
             self.CM.clear_all_caches()
 
             # Make sure they're completely down with no residule
             for node in self.CM.Env["nodes"]:
                 self.CM.rsh(node, self.CM["StopCmd"])
 
             return self.success()
 
         did_fail=0
         up_nodes = []
         for node in self.CM.Env["nodes"]:
             if self.CM.StataCM(node) == 1:
                 did_fail=1
                 up_nodes.append(node)
 
         if did_fail:
             return self.failure("Active nodes exist: " + repr(up_nodes))
 
         self.CM.log("Warn: All nodes stopped but CTS didnt detect: "
                     + repr(watch.unmatched))
 
         self.CM.clear_all_caches()
         return self.failure("Missing log message: "+repr(watch.unmatched))
 
     def is_applicable(self):
         '''SimulStopLite is a setup test and never applicable'''
         return 0
 
 ###################################################################
 class SimulStartLite(CTSTest):
 ###################################################################
     '''Start any stopped nodes ~ simultaneously'''
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="SimulStartLite"
 
     def __call__(self, dummy):
         '''Perform the 'SimulStartList' setup work. '''
         self.incr("calls")
         self.CM.debug("Setup: " + self.name)
 
         #        We ignore the "node" parameter...
         node_list = []
         for node in self.CM.Env["nodes"]:
             if self.CM.ShouldBeStatus[node] == "down":
                 self.incr("WasStopped")
                 node_list.append(node)
 
         self.set_timer()
         while len(node_list) > 0:
             watchpats = [ ]
 
             uppat = self.CM["Pat:Slave_started"]
             if self.CM.upcount() == 0:
                 uppat = self.CM["Pat:Local_started"]
 
             watchpats.append(self.CM["Pat:DC_IDLE"])
             for node in node_list:
                 watchpats.append(uppat % node)
                 watchpats.append(self.CM["Pat:InfraUp"] % node)
                 watchpats.append(self.CM["Pat:PacemakerUp"] % node)
 
             #   Start all the nodes - at about the same time...
             watch = self.create_watch(watchpats, self.CM["DeadTime"]+10)
             watch.setwatch()
 
             stonith = self.CM.prepare_fencing_watcher(self.name)
 
             for node in node_list:
                 self.CM.StartaCMnoBlock(node)
 
             watch.lookforall()
             node_list = self.CM.fencing_cleanup(self.name, stonith)
 
             # Remove node_list messages from watch.unmatched
             for node in node_list:
                 if watch.unmatched:
                     watch.unmatched.remove(uppat % node)
 
             if watch.unmatched:
                 for regex in watch.unmatched:
                     self.CM.log ("Warn: Startup pattern not found: %s" %(regex))
 
             if not self.CM.cluster_stable():
                 return self.failure("Cluster did not stabilize")
 
         did_fail=0
         unstable = []
         for node in self.CM.Env["nodes"]:
             if self.CM.StataCM(node) == 0:
                 did_fail=1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstarted nodes exist: " + repr(unstable))
 
         unstable = []
         for node in self.CM.Env["nodes"]:
             if not self.CM.node_stable(node):
                 did_fail=1
                 unstable.append(node)
 
         if did_fail:
             return self.failure("Unstable cluster nodes exist: " + repr(unstable))
 
         return self.success()
 
 
     def is_applicable(self):
         '''SimulStartLite is a setup test and never applicable'''
         return 0
 
 def TestList(cm, audits):
     result = []
     for testclass in AllTestClasses:
         bound_test = testclass(cm)
         if bound_test.is_applicable():
             bound_test.Audits = audits
             result.append(bound_test)
     return result
 
 ###################################################################
 class RemoteLXC(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="RemoteLXC"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.num_containers = 2
         self.is_container = 1
         self.failed = 0
         self.fail_string = ""
 
     def start_lxc_simple(self, node):
 
         rc = self.CM.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -v &>/dev/null")
         if rc == 1:
             return self.skipped()
 
         # restore any artifacts laying around from a previous test.
         self.CM.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null")
 
         # generate the containers, put them in the config, add some resources to them
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
         pats.append(self.CM["Pat:RscOpOK"] % ("lxc1", "start_0"))
         pats.append(self.CM["Pat:RscOpOK"] % ("lxc2", "start_0"))
         pats.append(self.CM["Pat:RscOpOK"] % ("lxc-ms", "start_0"))
         pats.append(self.CM["Pat:RscOpOK"] % ("lxc-ms", "promote_0"))
 
         self.CM.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -g -a -m -s -c %d &>/dev/null" % self.num_containers)
         self.set_timer("remoteSimpleInit")
         watch.lookforall()
         self.log_timer("remoteSimpleInit")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
     def cleanup_lxc_simple(self, node):
 
         pats = [ ]
         # if the test failed, attempt to clean up the cib and libvirt environment
         # as best as possible 
         if self.failed == 1:
             # restore libvirt and cib
             self.CM.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null")
             self.CM.rsh(node, "crm_resource -C -r container1 &>/dev/null")
             self.CM.rsh(node, "crm_resource -C -r container2 &>/dev/null")
             self.CM.rsh(node, "crm_resource -C -r lxc1 &>/dev/null")
             self.CM.rsh(node, "crm_resource -C -r lxc2 &>/dev/null")
             self.CM.rsh(node, "crm_resource -C -r lxc-ms &>/dev/null")
             time.sleep(20)
             return
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
 
         pats.append(self.CM["Pat:RscOpOK"] % ("container1", "stop_0"))
         pats.append(self.CM["Pat:RscOpOK"] % ("container2", "stop_0"))
 
         self.CM.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -p &>/dev/null")
         self.set_timer("remoteSimpleCleanup")
         watch.lookforall()
         self.log_timer("remoteSimpleCleanup")
 
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
         # cleanup libvirt
         self.CM.rsh(node, "/usr/share/pacemaker/tests/cts/lxc_autogen.sh -R &>/dev/null")
 
     def __call__(self, node):
         '''Perform the 'RemoteLXC' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         self.start_lxc_simple(node)
         self.cleanup_lxc_simple(node)
 
         self.CM.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
 
         if self.failed == 1:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [ """Updating failcount for ping""",
                  """LogActions: Recover ping""",
                  """LogActions: Recover lxc-ms""",
                  """LogActions: Recover container""",
                  # The orphaned lxc-ms resource causes an expected transition error
                  # that is a result of the pengine not having knowledge that the 
                  # ms resource used to be a clone.  As a result it looks like that 
                  # resource is running in multiple locations when it shouldn't... But in
                  # this instance we know why this error is occurring and that it is expected.
                  """Calculated Transition .* /var/lib/pacemaker/pengine/pe-error""",
                  """Resource lxc-ms .* is active on 2 nodes attempting recovery""",
                  """Unknown operation: fail""",
                  """notice: operation_finished: ping-""",
                  """notice: operation_finished: container""",
                  """notice: operation_finished: .*_monitor_0:.*:stderr""",
                  """(ERROR|error): sending stonithRA op to stonithd failed.""",
                 ]
 
 AllTestClasses.append(RemoteLXC)
 
 
 ###################################################################
 class RemoteBaremetal(CTSTest):
 ###################################################################
     def __init__(self, cm):
         CTSTest.__init__(self,cm)
         self.name="RemoteBaremetal"
         self.start = StartTest(cm)
         self.startall = SimulStartLite(cm)
         self.stop = StopTest(cm)
         self.pcmk_started=0
-        self.rsc_added=0
         self.failed = 0
         self.fail_string = ""
+        self.remote_node_added = 0
+        self.remote_node="remote1"
+        self.remote_rsc_added = 0
+        self.remote_rsc="remote1-rsc"
         self.cib_cmd="""cibadmin -C -o %s -X '%s' """
 
-    def del_connection_rsc(self, node):
-
-        if self.rsc_added == 0:
-            return
+    def del_rsc(self, node, rsc):
 
         for othernode in self.CM.Env["nodes"]:
             if othernode == node:
                 # we don't want to try and use the cib that we just shutdown.
                 # find a cluster node that is not our soon to be remote-node.
                 continue
 
-            rc = self.CM.rsh(othernode, "crm_resource -D -r remote1 -t primitive")
+            rc = self.CM.rsh(othernode, "crm_resource -D -r %s -t primitive" % (rsc))
             if rc != 0:
-                self.fail_string = ("Connection resource removal failed")
+                self.fail_string = ("Removal of resource '%s' failed" % (rsc))
                 self.failed = 1
             else:
                 self.fail_string = ""
                 self.failed = 0
                 break
 
+    def add_rsc(self, node, rsc_xml):
+        failed=0
+        fail_string=""
+        for othernode in self.CM.Env["nodes"]:
+            if othernode == node:
+                # we don't want to try and use the cib that we just shutdown.
+                # find a cluster node that is not our soon to be remote-node.
+                continue
+
+            rc = self.CM.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
+            if rc != 0:
+                fail_string = "resource creation failed"
+                failed = 1
+            else:
+                fail_string = ""
+                failed = 0
+                break
+
+        if failed == 1:
+            self.failed=failed
+            self.fail_string=fail_string
+
+    def add_primitive_rsc(self, node):
+        rsc_xml="""
+<primitive class="ocf" id="%s" provider="pacemaker" type="Dummy">
+    <operations>
+      <op id="remote1-rsc-monitor-interval-10s" interval="10s" name="monitor"/>
+    </operations>
+</primitive>""" % (self.remote_rsc)
+        self.add_rsc(node, rsc_xml)
+        if self.failed == 0:
+            self.remote_rsc_added=1
+
     def add_connection_rsc(self, node):
         rsc_xml="""
-<primitive class="ocf" id="remote1" provider="pacemaker" type="remote">
+<primitive class="ocf" id="%s" provider="pacemaker" type="remote">
     <instance_attributes id="remote1-instance_attributes"/>
         <instance_attributes id="remote1-instance_attributes">
           <nvpair id="remote1-instance_attributes-server" name="server" value="%s"/>
         </instance_attributes>
     <operations>
       <op id="remote1-monitor-interval-60s" interval="60s" name="monitor"/>
           <op id="remote1-name-start-interval-0-timeout-60" interval="0" name="start" timeout="60"/>
     </operations>
     <meta_attributes id="remote1-meta_attributes"/>
-</primitive>""" % node
-
-        for othernode in self.CM.Env["nodes"]:
-            if othernode == node:
-                # we don't want to try and use the cib that we just shutdown.
-                # find a cluster node that is not our soon to be remote-node.
-                continue
+</primitive>""" % (self.remote_node, node)
+        self.add_rsc(node, rsc_xml)
+        if self.failed == 0:
+            self.remote_node_added=1
 
-            rc = self.CM.rsh(othernode, self.cib_cmd % ("resources", rsc_xml))
-            if rc != 0:
-                self.fail_string = "Connection resource creation failed"
-                self.failed = 1
-            else:
-                self.fail_string = ""
-                self.failed = 0
-                self.rsc_added = 1
-                break
-
-    def start_metal(self, node):
+    def step1_start_metal(self, node):
         pcmk_started=0
 
         # make sure the resource doesn't already exist for some reason
-        self.CM.rsh(node, "crm_resource -D -r remote1 -t primitive")
+        self.CM.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_rsc))
+        self.CM.rsh(node, "crm_resource -D -r %s -t primitive" % (self.remote_node))
 
         if not self.stop(node):
             self.failed = 1
             self.fail_string = "Failed to shutdown cluster node %s" % (node)
             return
 
         for i in range(10):
             rc = self.CM.rsh(node, "service pacemaker_remote start")
             if rc != 0:
                 time.sleep(6)
             else:
                 self.pcmk_started = 1
                 break
 
         if self.pcmk_started == 0:
             self.failed = 1
             self.fail_string = "Failed to start pacemaker_remote on node %s" % (node)
             return
 
         # convert node to baremetal node now that it has shutdow the cluster stack
         pats = [ ]
         watch = self.create_watch(pats, 120)
         watch.setwatch()
-        pats.append(self.CM["Pat:RscOpOK"] % ("remote1", "start_0"))
+        pats.append("process_lrm_event: LRM operation %s_start_0.*confirmed.*ok" % (self.remote_node))
 
         self.add_connection_rsc(node)
 
         self.set_timer("remoteMetalInit")
         watch.lookforall()
         self.log_timer("remoteMetalInit")
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
+    def step2_add_rsc(self, node):
+        if self.failed == 1:
+            return
+
+
+        # verify we can put a resource on the remote node
+        pats = [ ]
+        watch = self.create_watch(pats, 120)
+        watch.setwatch()
+        pats.append("process_lrm_event: LRM operation %s_start_0.*node=%s, .*confirmed.*ok" % (self.remote_rsc, self.remote_node))
+
+        # Add a resource that must live on remote-node
+        self.add_primitive_rsc(node)
+        # this crm_resource command actually occurs on the remote node
+        # which verifies that the ipc proxy works
+        rc = self.CM.rsh(node, "crm_resource -M -r remote1-rsc -N %s" % (self.remote_node))
+        if rc != 0:
+            self.fail_string = "Failed to place primitive on remote-node"
+            self.failed = 1
+            return
+
+        self.set_timer("remoteMetalRsc")
+        watch.lookforall()
+        self.log_timer("remoteMetalRsc")
+        if watch.unmatched:
+            self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
+            self.failed = 1
 
     def cleanup_metal(self, node):
         if self.pcmk_started == 0:
             return
 
         pats = [ ]
 
         watch = self.create_watch(pats, 120)
         watch.setwatch()
-        pats.append(self.CM["Pat:RscOpOK"] % ("remote1", "stop_0"))
 
-        self.del_connection_rsc(node)
+        if self.remote_rsc_added == 1:
+            pats.append("process_lrm_event: LRM operation %s_stop_0.*confirmed.*ok" % (self.remote_rsc))
+        if self.remote_node_added == 1:
+            pats.append("process_lrm_event: LRM operation %s_stop_0.*confirmed.*ok" % (self.remote_node))
+
         self.set_timer("remoteMetalCleanup")
+        if self.remote_rsc_added == 1:
+            self.CM.rsh(node, "crm_resource -U -r remote1-rsc -N %s" % (self.remote_node))
+            self.del_rsc(node, self.remote_rsc)
+        if self.remote_node_added == 1:
+            self.del_rsc(node, self.remote_node)
         watch.lookforall()
         self.log_timer("remoteMetalCleanup")
 
         if watch.unmatched:
             self.fail_string = "Unmatched patterns: %s" % (repr(watch.unmatched))
             self.failed = 1
 
         # disable pcmk remote
         for i in range(10):
             rc = self.CM.rsh(node, "service pacemaker_remote stop")
             if rc != 0:
                 time.sleep(6)
             else:
                 break
 
     def setup_env(self):
         sync_key = 0
 
         # we are assuming if all nodes have a key, that it is
         # the right key... If any node doesn't have a remote
         # key, we regenerate it everywhere.
         for node in self.CM.Env["nodes"]:
             rc = self.CM.rsh(node, "ls /etc/pacemaker/authkey")
             if rc != 0:
                 sync_key = 1
                 break
 
         if sync_key == 0:
             return
 
         # create key locally
         os.system("/usr/share/pacemaker/tests/cts/lxc_autogen.sh -k &> /dev/null")
 
         # sync key throughout the cluster
         for node in self.CM.Env["nodes"]:
             rc = self.CM.rsh(node, "mkdir /etc/pacemaker")
             self.CM.rsh.cp("/etc/pacemaker/authkey", "%s:/etc/pacemaker/authkey" % (node))
 
     def __call__(self, node):
         '''Perform the 'RemoteBaremetal' test. '''
         self.incr("calls")
 
         ret = self.startall(None)
         if not ret:
             return self.failure("Setup failed, start all nodes failed.")
 
         self.setup_env()
-        self.start_metal(node)
+        self.step1_start_metal(node)
+        self.step2_add_rsc(node)
         self.cleanup_metal(node)
 
         self.CM.debug("Waiting for the cluster to recover")
         self.CM.cluster_stable()
         if self.failed == 1:
             return self.failure(self.fail_string)
 
         return self.success()
 
     def errorstoignore(self):
         '''Return list of errors which should be ignored'''
         return [ """is running on remote1 which isn't allowed""",
                  """Connection terminated""",
                  """Failed to send remote""",
                 ]
 
 AllTestClasses.append(RemoteBaremetal)
 
 # vim:ts=4:sw=4:et:
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 15b512c0a2..7816d7eb73 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,768 +1,785 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 
 void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
 
 resource_object_functions_t resource_class_functions[] = {
     {
      native_unpack,
      native_find_rsc,
      native_parameter,
      native_print,
      native_active,
      native_resource_state,
      native_location,
      native_free},
     {
      group_unpack,
      native_find_rsc,
      native_parameter,
      group_print,
      group_active,
      group_resource_state,
      native_location,
      group_free},
     {
      clone_unpack,
      native_find_rsc,
      native_parameter,
      clone_print,
      clone_active,
      clone_resource_state,
      native_location,
      clone_free},
     {
      master_unpack,
      native_find_rsc,
      native_parameter,
      clone_print,
      clone_active,
      clone_resource_state,
      native_location,
      clone_free}
 };
 
 enum pe_obj_types
 get_resource_type(const char *name)
 {
     if (safe_str_eq(name, XML_CIB_TAG_RESOURCE)) {
         return pe_native;
 
     } else if (safe_str_eq(name, XML_CIB_TAG_GROUP)) {
         return pe_group;
 
     } else if (safe_str_eq(name, XML_CIB_TAG_INCARNATION)) {
         return pe_clone;
 
     } else if (safe_str_eq(name, XML_CIB_TAG_MASTER)) {
         return pe_master;
     }
 
     return pe_unknown;
 }
 
 const char *
 get_resource_typename(enum pe_obj_types type)
 {
     switch (type) {
         case pe_native:
             return XML_CIB_TAG_RESOURCE;
         case pe_group:
             return XML_CIB_TAG_GROUP;
         case pe_clone:
             return XML_CIB_TAG_INCARNATION;
         case pe_master:
             return XML_CIB_TAG_MASTER;
         case pe_unknown:
             return "unknown";
     }
     return "<unknown>";
 }
 
 static void
 dup_attr(gpointer key, gpointer value, gpointer user_data)
 {
     add_hash_param(user_data, key, value);
 }
 
 void
 get_meta_attributes(GHashTable * meta_hash, resource_t * rsc,
                     node_t * node, pe_working_set_t * data_set)
 {
     GHashTable *node_hash = NULL;
 
     if (node) {
         node_hash = node->details->attrs;
     }
 
     if (rsc->xml) {
         xmlAttrPtr xIter = NULL;
 
         for (xIter = rsc->xml->properties; xIter; xIter = xIter->next) {
             const char *prop_name = (const char *)xIter->name;
             const char *prop_value = crm_element_value(rsc->xml, prop_name);
 
             add_hash_param(meta_hash, prop_name, prop_value);
         }
     }
 
     unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_META_SETS, node_hash,
                                meta_hash, NULL, FALSE, data_set->now);
 
     /* populate from the regular attributes until the GUI can create
      * meta attributes
      */
     unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, node_hash,
                                meta_hash, NULL, FALSE, data_set->now);
 
     /* set anything else based on the parent */
     if (rsc->parent != NULL) {
         g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
     }
 
     /* and finally check the defaults */
     unpack_instance_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_META_SETS,
                                node_hash, meta_hash, NULL, FALSE, data_set->now);
 }
 
 void
 get_rsc_attributes(GHashTable * meta_hash, resource_t * rsc,
                    node_t * node, pe_working_set_t * data_set)
 {
     GHashTable *node_hash = NULL;
 
     if (node) {
         node_hash = node->details->attrs;
     }
 
     unpack_instance_attributes(data_set->input, rsc->xml, XML_TAG_ATTR_SETS, node_hash,
                                meta_hash, NULL, FALSE, data_set->now);
 
     if (rsc->container) {
         g_hash_table_replace(meta_hash, strdup(CRM_META"_"XML_RSC_ATTR_CONTAINER), strdup(rsc->container->id));
     }
 
     /* set anything else based on the parent */
     if (rsc->parent != NULL) {
         get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
 
     } else {
         /* and finally check the defaults */
         unpack_instance_attributes(data_set->input, data_set->rsc_defaults, XML_TAG_ATTR_SETS,
                                    node_hash, meta_hash, NULL, FALSE, data_set->now);
     }
 }
 
 static char *
 template_op_key(xmlNode * op)
 {
     const char *name = crm_element_value(op, "name");
     const char *role = crm_element_value(op, "role");
     char *key = NULL;
 
     if (role == NULL || crm_str_eq(role, RSC_ROLE_STARTED_S, TRUE)
         || crm_str_eq(role, RSC_ROLE_SLAVE_S, TRUE)) {
         role = RSC_ROLE_UNKNOWN_S;
     }
 
     key = crm_concat(name, role, '-');
     return key;
 }
 
 static gboolean
 unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     xmlNode *cib_resources = NULL;
     xmlNode *template = NULL;
     xmlNode *new_xml = NULL;
     xmlNode *child_xml = NULL;
     xmlNode *rsc_ops = NULL;
     xmlNode *template_ops = NULL;
     const char *template_ref = NULL;
     const char *clone = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for template unpacking");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", crm_element_name(xml_obj));
         return FALSE;
     }
 
     if (crm_str_eq(template_ref, id, TRUE)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
     if (cib_resources == NULL) {
         pe_err("No resources configured");
         return FALSE;
     }
 
     template = find_entity(cib_resources, XML_CIB_TAG_RSC_TEMPLATE, template_ref);
     if (template == NULL) {
         pe_err("No template named '%s'", template_ref);
         return FALSE;
     }
 
     new_xml = copy_xml(template);
     xmlNodeSetName(new_xml, xml_obj->name);
     crm_xml_replace(new_xml, XML_ATTR_ID, id);
 
     clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
     if(clone) {
         crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
     }
 
     template_ops = find_xml_node(new_xml, "operations", FALSE);
 
     for (child_xml = __xml_first_child(xml_obj); child_xml != NULL;
          child_xml = __xml_next(child_xml)) {
         xmlNode *new_child = NULL;
 
         new_child = add_node_copy(new_xml, child_xml);
 
         if (crm_str_eq((const char *)new_child->name, "operations", TRUE)) {
             rsc_ops = new_child;
         }
     }
 
     if (template_ops && rsc_ops) {
         xmlNode *op = NULL;
         GHashTable *rsc_ops_hash =
             g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, NULL);
 
         for (op = __xml_first_child(rsc_ops); op != NULL; op = __xml_next(op)) {
             char *key = template_op_key(op);
 
             g_hash_table_insert(rsc_ops_hash, key, op);
         }
 
         for (op = __xml_first_child(template_ops); op != NULL; op = __xml_next(op)) {
             char *key = template_op_key(op);
 
             if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
                 add_node_copy(rsc_ops, op);
             }
 
             free(key);
         }
 
         if (rsc_ops_hash) {
             g_hash_table_destroy(rsc_ops_hash);
         }
 
         free_xml(template_ops);
     }
 
     /*free_xml(*expanded_xml); */
     *expanded_xml = new_xml;
 
     /* Disable multi-level templates for now */
     /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
        free_xml(*expanded_xml);
        *expanded_xml = NULL;
 
        return FALSE;
        } */
 
     return TRUE;
 }
 
 static gboolean
 add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     const char *template_ref = NULL;
     const char *id = NULL;
     xmlNode *rsc_set = NULL;
     xmlNode *rsc_ref = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for processing resource list of template");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", crm_element_name(xml_obj));
         return FALSE;
     }
 
     if (crm_str_eq(template_ref, id, TRUE)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     rsc_set = g_hash_table_lookup(data_set->template_rsc_sets, template_ref);
     if (rsc_set == NULL) {
         rsc_set = create_xml_node(NULL, XML_CONS_TAG_RSC_SET);
         crm_xml_add(rsc_set, XML_ATTR_ID, template_ref);
 
         g_hash_table_insert(data_set->template_rsc_sets, strdup(template_ref), rsc_set);
     }
 
     rsc_ref = create_xml_node(rsc_set, XML_TAG_RESOURCE_REF);
     crm_xml_add(rsc_ref, XML_ATTR_ID, id);
 
     return TRUE;
 }
 
 gboolean
 common_unpack(xmlNode * xml_obj, resource_t ** rsc,
               resource_t * parent, pe_working_set_t * data_set)
 {
     xmlNode *expanded_xml = NULL;
     xmlNode *ops = NULL;
     resource_t *top = NULL;
     const char *value = NULL;
     const char *class = NULL; /* Look for this after any templates have been expanded */
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
+    int container_remote_node = 0;
+    int baremetal_remote_node = 0;
 
     crm_log_xml_trace(xml_obj, "Processing resource input...");
 
     if (id == NULL) {
         pe_err("Must specify id tag in <resource>");
         return FALSE;
 
     } else if (rsc == NULL) {
         pe_err("Nowhere to unpack resource into");
         return FALSE;
 
     }
 
     if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
         return FALSE;
     }
 
     *rsc = calloc(1, sizeof(resource_t));
 
     if (expanded_xml) {
         crm_log_xml_trace(expanded_xml, "Expanded resource...");
         (*rsc)->xml = expanded_xml;
         (*rsc)->orig_xml = xml_obj;
 
     } else {
         (*rsc)->xml = xml_obj;
         (*rsc)->orig_xml = NULL;
     }
 
     /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
     class = crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS);
     (*rsc)->parent = parent;
 
     ops = find_xml_node((*rsc)->xml, "operations", FALSE);
     (*rsc)->ops_xml = expand_idref(ops, data_set->input);
 
     (*rsc)->variant = get_resource_type(crm_element_name((*rsc)->xml));
     if ((*rsc)->variant == pe_unknown) {
         pe_err("Unknown resource type: %s", crm_element_name((*rsc)->xml));
         free(*rsc);
         return FALSE;
     }
 
     (*rsc)->parameters =
         g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
 
     (*rsc)->meta =
         g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
 
     (*rsc)->allowed_nodes =
         g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
 
     (*rsc)->known_on = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, g_hash_destroy_str);
 
     value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
     if (value) {
         (*rsc)->id = crm_concat(id, value, ':');
         add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
 
     } else {
         (*rsc)->id = strdup(id);
     }
 
     (*rsc)->fns = &resource_class_functions[(*rsc)->variant];
     pe_rsc_trace((*rsc), "Unpacking resource...");
 
     get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
 
     (*rsc)->flags = 0;
     set_bit((*rsc)->flags, pe_rsc_runnable);
     set_bit((*rsc)->flags, pe_rsc_provisional);
 
     if (is_set(data_set->flags, pe_flag_is_managed_default)) {
         set_bit((*rsc)->flags, pe_rsc_managed);
     }
 
     (*rsc)->rsc_cons = NULL;
     (*rsc)->rsc_tickets = NULL;
     (*rsc)->actions = NULL;
     (*rsc)->role = RSC_ROLE_STOPPED;
     (*rsc)->next_role = RSC_ROLE_UNKNOWN;
 
     (*rsc)->recovery_type = recovery_stop_start;
     (*rsc)->stickiness = data_set->default_resource_stickiness;
     (*rsc)->migration_threshold = INFINITY;
     (*rsc)->failure_timeout = 0;
 
     value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
     (*rsc)->priority = crm_parse_int(value, "0");
     (*rsc)->effective_priority = (*rsc)->priority;
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
     if (crm_is_true(value)) {
         set_bit((*rsc)->flags, pe_rsc_notify);
     }
 
+    if (xml_contains_remote_node((*rsc)->xml)) {
+        if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
+            container_remote_node = 1;
+        } else {
+            baremetal_remote_node = 1;
+        }
+    }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
     if (crm_is_true(value)) {
         set_bit((*rsc)->flags, pe_rsc_allow_migrate);
+    } else if (value == NULL && baremetal_remote_node) {
+        /* by default, we want baremetal remote-nodes to be able
+         * to float around the cluster without having to stop all the
+         * resources within the remote-node before moving. Allowing
+         * migration support enables this feature. If this ever causes
+         * problems, migration support can be explicitly turned off with
+         * allow-migrate=false. */
+        set_bit((*rsc)->flags, pe_rsc_allow_migrate);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
     if (value != NULL && safe_str_neq("default", value)) {
         gboolean bool_value = TRUE;
 
         crm_str_to_boolean(value, &bool_value);
         if (bool_value == FALSE) {
             clear_bit((*rsc)->flags, pe_rsc_managed);
         } else {
             set_bit((*rsc)->flags, pe_rsc_managed);
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
     if (value != NULL && safe_str_neq("default", value)) {
         gboolean bool_value = FALSE;
 
         crm_str_to_boolean(value, &bool_value);
         if (bool_value == TRUE) {
             clear_bit((*rsc)->flags, pe_rsc_managed);
             set_bit((*rsc)->flags, pe_rsc_maintenance);
         }
 
     } else if (is_set(data_set->flags, pe_flag_maintenance_mode)) {
         clear_bit((*rsc)->flags, pe_rsc_managed);
         set_bit((*rsc)->flags, pe_rsc_maintenance);
     }
 
     pe_rsc_trace((*rsc), "Options for %s", (*rsc)->id);
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
 
     top = uber_parent(*rsc);
     if (crm_is_true(value) || top->variant < pe_clone) {
         set_bit((*rsc)->flags, pe_rsc_unique);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
     if (safe_str_eq(value, "restart")) {
         (*rsc)->restart_type = pe_restart_restart;
         pe_rsc_trace((*rsc), "\tDependency restart handling: restart");
 
     } else {
         (*rsc)->restart_type = pe_restart_ignore;
         pe_rsc_trace((*rsc), "\tDependency restart handling: ignore");
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
     if (safe_str_eq(value, "stop_only")) {
         (*rsc)->recovery_type = recovery_stop_only;
         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop only");
 
     } else if (safe_str_eq(value, "block")) {
         (*rsc)->recovery_type = recovery_block;
         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: block");
 
     } else {
         (*rsc)->recovery_type = recovery_stop_start;
         pe_rsc_trace((*rsc), "\tMultiple running resource recovery: stop/start");
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
     if (value != NULL && safe_str_neq("default", value)) {
         (*rsc)->stickiness = char2score(value);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
     if (value != NULL && safe_str_neq("default", value)) {
         (*rsc)->migration_threshold = char2score(value);
 
     } else if (value == NULL) {
         /* Make a best-effort guess at a migration threshold for people with 0.6 configs
          * try with underscores and hyphens, from both the resource and global defaults section
          */
 
         value = g_hash_table_lookup((*rsc)->meta, "resource-failure-stickiness");
         if (value == NULL) {
             value = g_hash_table_lookup((*rsc)->meta, "resource_failure_stickiness");
         }
         if (value == NULL) {
             value =
                 g_hash_table_lookup(data_set->config_hash, "default-resource-failure-stickiness");
         }
         if (value == NULL) {
             value =
                 g_hash_table_lookup(data_set->config_hash, "default_resource_failure_stickiness");
         }
 
         if (value) {
             int fail_sticky = char2score(value);
 
             if (fail_sticky == -INFINITY) {
                 (*rsc)->migration_threshold = 1;
                 pe_rsc_info((*rsc),
                             "Set a migration threshold of %d for %s based on a failure-stickiness of %s",
                             (*rsc)->migration_threshold, (*rsc)->id, value);
 
             } else if ((*rsc)->stickiness != 0 && fail_sticky != 0) {
                 (*rsc)->migration_threshold = (*rsc)->stickiness / fail_sticky;
                 if ((*rsc)->migration_threshold < 0) {
                     /* Make sure it's positive */
                     (*rsc)->migration_threshold = 0 - (*rsc)->migration_threshold;
                 }
                 (*rsc)->migration_threshold += 1;
                 pe_rsc_info((*rsc),
                             "Calculated a migration threshold for %s of %d based on a stickiness of %d/%s",
                             (*rsc)->id, (*rsc)->migration_threshold, (*rsc)->stickiness, value);
             }
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
     if (safe_str_eq(value, "nothing")) {
 
     } else if (safe_str_eq(value, "quorum")) {
         set_bit((*rsc)->flags, pe_rsc_needs_quorum);
 
     } else if (safe_str_eq(value, "unfencing")) {
         set_bit((*rsc)->flags, pe_rsc_needs_fencing);
         set_bit((*rsc)->flags, pe_rsc_needs_unfencing);
         if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
             crm_notice("%s requires (un)fencing but fencing is disabled", (*rsc)->id);
         }
 
     } else if (safe_str_eq(value, "fencing")) {
         set_bit((*rsc)->flags, pe_rsc_needs_fencing);
         if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
             crm_notice("%s requires fencing but fencing is disabled", (*rsc)->id);
         }
 
     } else {
         if (value) {
             crm_config_err("Invalid value for %s->requires: %s%s",
                            (*rsc)->id, value,
                            is_set(data_set->flags,
                                   pe_flag_stonith_enabled) ? "" : " (stonith-enabled=false)");
         }
 
         if (is_set(data_set->flags, pe_flag_stonith_enabled)) {
             set_bit((*rsc)->flags, pe_rsc_needs_fencing);
             value = "fencing (default)";
 
         } else if (data_set->no_quorum_policy == no_quorum_ignore) {
             value = "nothing (default)";
 
         } else {
             set_bit((*rsc)->flags, pe_rsc_needs_quorum);
             value = "quorum (default)";
         }
     }
 
     pe_rsc_trace((*rsc), "\tRequired to start: %s", value);
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
     if (value != NULL) {
         /* call crm_get_msec() and convert back to seconds */
         (*rsc)->failure_timeout = (crm_get_msec(value) / 1000);
     }
 
     get_target_role(*rsc, &((*rsc)->next_role));
     pe_rsc_trace((*rsc), "\tDesired next state: %s",
                  (*rsc)->next_role != RSC_ROLE_UNKNOWN ? role2text((*rsc)->next_role) : "default");
 
     if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
         return FALSE;
     }
 
     if (is_set(data_set->flags, pe_flag_symmetric_cluster)) {
         resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
-    } else if (xml_contains_remote_node((*rsc)->xml) && g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
+    } else if (container_remote_node) {
         /* remote resources tied to a container resource must always be allowed
          * to opt-in to the cluster. Whether the connection resource is actually
          * allowed to be placed on a node is dependent on the container resource */
         resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
     }
 
     pe_rsc_trace((*rsc), "\tAction notification: %s",
                  is_set((*rsc)->flags, pe_rsc_notify) ? "required" : "not required");
 
     if (safe_str_eq(class, "stonith")) {
         set_bit(data_set->flags, pe_flag_have_stonith_resource);
     }
 
     (*rsc)->utilization =
         g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
 
     unpack_instance_attributes(data_set->input, (*rsc)->xml, XML_TAG_UTILIZATION, NULL,
                                (*rsc)->utilization, NULL, FALSE, data_set->now);
 
 /* 	data_set->resources = g_list_append(data_set->resources, (*rsc)); */
 
     if (expanded_xml) {
         if (add_template_rsc(xml_obj, data_set) == FALSE) {
             return FALSE;
         }
     }
     return TRUE;
 }
 
 void
 common_update_score(resource_t * rsc, const char *id, int score)
 {
     node_t *node = NULL;
 
     node = pe_hash_table_lookup(rsc->allowed_nodes, id);
     if (node != NULL) {
         pe_rsc_trace(rsc, "Updating score for %s on %s: %d + %d", rsc->id, id, node->weight, score);
         node->weight = merge_weights(node->weight, score);
     }
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child_rsc = (resource_t *) gIter->data;
 
             common_update_score(child_rsc, id, score);
         }
     }
 }
 
 gboolean
 is_parent(resource_t *child, resource_t *rsc)
 {
     resource_t *parent = child;
 
     if (parent == NULL || rsc == NULL) {
         return FALSE;
     }
     while (parent->parent != NULL) {
         if (parent->parent == rsc) {
             return TRUE;
         }
         parent = parent->parent;
     }
     return FALSE;
 }
 
 resource_t *
 uber_parent(resource_t * rsc)
 {
     resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while (parent->parent != NULL) {
         parent = parent->parent;
     }
     return parent;
 }
 
 void
 common_free(resource_t * rsc)
 {
     if (rsc == NULL) {
         return;
     }
 
     pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
 
     g_list_free(rsc->rsc_cons);
     g_list_free(rsc->rsc_cons_lhs);
     g_list_free(rsc->rsc_tickets);
     g_list_free(rsc->dangling_migrations);
 
     if (rsc->parameters != NULL) {
         g_hash_table_destroy(rsc->parameters);
     }
     if (rsc->meta != NULL) {
         g_hash_table_destroy(rsc->meta);
     }
     if (rsc->utilization != NULL) {
         g_hash_table_destroy(rsc->utilization);
     }
 
     if (rsc->parent == NULL && is_set(rsc->flags, pe_rsc_orphan)) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
         free_xml(rsc->orig_xml);
         rsc->orig_xml = NULL;
 
         /* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
     } else if (rsc->orig_xml) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
     }
     if (rsc->running_on) {
         g_list_free(rsc->running_on);
         rsc->running_on = NULL;
     }
     if (rsc->known_on) {
         g_hash_table_destroy(rsc->known_on);
         rsc->known_on = NULL;
     }
     if (rsc->actions) {
         g_list_free(rsc->actions);
         rsc->actions = NULL;
     }
     if (rsc->allowed_nodes) {
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = NULL;
     }
     g_list_free(rsc->fillers);
     g_list_free(rsc->rsc_location);
     pe_rsc_trace(rsc, "Resource freed");
     free(rsc->id);
     free(rsc->clone_name);
     free(rsc->allocated_to);
     free(rsc->variant_opaque);
     free(rsc->pending_task);
     free(rsc);
 }
diff --git a/pengine/test10/remote-move.dot b/pengine/test10/remote-move.dot
index 8cef9d8bf4..e5d62e7338 100644
--- a/pengine/test10/remote-move.dot
+++ b/pengine/test10/remote-move.dot
@@ -1,24 +1,20 @@
  digraph "g" {
-"FAKE2_monitor_60000 remote1" [ style=bold color="green" fontcolor="black"]
-"FAKE2_start_0 remote1" -> "FAKE2_monitor_60000 remote1" [ style = bold]
-"FAKE2_start_0 remote1" [ style=bold color="green" fontcolor="black"]
-"FAKE2_stop_0 remote1" -> "FAKE2_start_0 remote1" [ style = bold]
-"FAKE2_stop_0 remote1" -> "all_stopped" [ style = bold]
-"FAKE2_stop_0 remote1" -> "remote1_stop_0 18builder" [ style = bold]
-"FAKE2_stop_0 remote1" [ style=bold color="green" fontcolor="black"]
 "all_stopped" [ style=bold color="green" fontcolor="orange"]
+"remote1_migrate_from_0 18node1" -> "remote1_start_0 18node1" [ style = bold]
+"remote1_migrate_from_0 18node1" -> "remote1_stop_0 18builder" [ style = bold]
+"remote1_migrate_from_0 18node1" [ style=bold color="green" fontcolor="black"]
+"remote1_migrate_to_0 18builder" -> "remote1_migrate_from_0 18node1" [ style = bold]
+"remote1_migrate_to_0 18builder" [ style=bold color="green" fontcolor="black"]
 "remote1_monitor_60000 18node1" [ style=bold color="green" fontcolor="black"]
-"remote1_start_0 18node1" -> "FAKE2_monitor_60000 remote1" [ style = bold]
-"remote1_start_0 18node1" -> "FAKE2_start_0 remote1" [ style = bold]
 "remote1_start_0 18node1" -> "remote1_monitor_60000 18node1" [ style = bold]
-"remote1_start_0 18node1" [ style=bold color="green" fontcolor="black"]
+"remote1_start_0 18node1" [ style=bold color="green" fontcolor="orange"]
 "remote1_stop_0 18builder" -> "all_stopped" [ style = bold]
 "remote1_stop_0 18builder" -> "remote1_start_0 18node1" [ style = bold]
 "remote1_stop_0 18builder" [ style=bold color="green" fontcolor="black"]
 "shooter_monitor_60000 18builder" [ style=bold color="green" fontcolor="black"]
 "shooter_start_0 18builder" -> "shooter_monitor_60000 18builder" [ style = bold]
 "shooter_start_0 18builder" [ style=bold color="green" fontcolor="black"]
 "shooter_stop_0 18node1" -> "all_stopped" [ style = bold]
 "shooter_stop_0 18node1" -> "shooter_start_0 18builder" [ style = bold]
 "shooter_stop_0 18node1" [ style=bold color="green" fontcolor="black"]
 }
diff --git a/pengine/test10/remote-move.exp b/pengine/test10/remote-move.exp
index 4458362507..6724679e93 100644
--- a/pengine/test10/remote-move.exp
+++ b/pengine/test10/remote-move.exp
@@ -1,135 +1,115 @@
 <transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
   <synapse id="0">
     <action_set>
       <rsc_op id="16" operation="monitor" operation_key="shooter_monitor_60000" on_node="18builder" on_node_uuid="5">
         <primitive id="shooter" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="15" operation="start" operation_key="shooter_start_0" on_node="18builder" on_node_uuid="5"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="1">
     <action_set>
       <rsc_op id="15" operation="start" operation_key="shooter_start_0" on_node="18builder" on_node_uuid="5">
         <primitive id="shooter" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="14" operation="stop" operation_key="shooter_stop_0" on_node="18node1" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="2">
     <action_set>
       <rsc_op id="14" operation="stop" operation_key="shooter_stop_0" on_node="18node1" on_node_uuid="1">
         <primitive id="shooter" class="stonith" type="fence_xvm"/>
         <attributes CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs/>
   </synapse>
   <synapse id="3">
     <action_set>
-      <rsc_op id="19" operation="monitor" operation_key="remote1_monitor_60000" on_node="18node1" on_node_uuid="1">
+      <rsc_op id="21" operation="migrate_from" operation_key="remote1_migrate_from_0" on_node="18node1" on_node_uuid="1">
         <primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
-        <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_timeout="20000" />
+        <attributes CRM_meta_migrate_source="18builder" CRM_meta_migrate_target="18node1" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1"/>
+        <rsc_op id="20" operation="migrate_to" operation_key="remote1_migrate_to_0" on_node="18builder" on_node_uuid="5"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="4">
     <action_set>
-      <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1">
+      <rsc_op id="20" operation="migrate_to" operation_key="remote1_migrate_to_0" on_node="18builder" on_node_uuid="5">
         <primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
-        <attributes CRM_meta_timeout="20000" />
+        <attributes CRM_meta_migrate_source="18builder" CRM_meta_migrate_target="18node1" CRM_meta_record_pending="true" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5"/>
-      </trigger>
-    </inputs>
+    <inputs/>
   </synapse>
   <synapse id="5">
     <action_set>
-      <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5">
+      <rsc_op id="19" operation="monitor" operation_key="remote1_monitor_60000" on_node="18node1" on_node_uuid="1">
         <primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
-        <attributes CRM_meta_timeout="20000" />
+        <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder"/>
+        <pseudo_event id="18" operation="start" operation_key="remote1_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="6">
     <action_set>
-      <rsc_op id="23" operation="start" operation_key="FAKE2_start_0" on_node="remote1" on_node_uuid="remote1" router_node="18node1">
-        <primitive id="FAKE2" class="ocf" provider="heartbeat" type="Dummy"/>
+      <pseudo_event id="18" operation="start" operation_key="remote1_start_0">
         <attributes CRM_meta_timeout="20000" />
-      </rsc_op>
+      </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1"/>
+        <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5"/>
       </trigger>
       <trigger>
-        <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder"/>
+        <rsc_op id="21" operation="migrate_from" operation_key="remote1_migrate_from_0" on_node="18node1" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="7">
     <action_set>
-      <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder">
-        <primitive id="FAKE2" class="ocf" provider="heartbeat" type="Dummy"/>
+      <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5">
+        <primitive id="remote1" class="ocf" provider="pacemaker" type="remote"/>
         <attributes CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
-    <inputs/>
-  </synapse>
-  <synapse id="8">
-    <action_set>
-      <rsc_op id="6" operation="monitor" operation_key="FAKE2_monitor_60000" on_node="remote1" on_node_uuid="remote1" router_node="18node1">
-        <primitive id="FAKE2" class="ocf" provider="heartbeat" type="Dummy"/>
-        <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_timeout="20000" />
-      </rsc_op>
-    </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="18" operation="start" operation_key="remote1_start_0" on_node="18node1" on_node_uuid="1"/>
-      </trigger>
-      <trigger>
-        <rsc_op id="23" operation="start" operation_key="FAKE2_start_0" on_node="remote1" on_node_uuid="remote1" router_node="18node1"/>
+        <rsc_op id="21" operation="migrate_from" operation_key="remote1_migrate_from_0" on_node="18node1" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="9">
+  <synapse id="8">
     <action_set>
       <pseudo_event id="7" operation="all_stopped" operation_key="all_stopped">
         <attributes />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="14" operation="stop" operation_key="shooter_stop_0" on_node="18node1" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <rsc_op id="17" operation="stop" operation_key="remote1_stop_0" on_node="18builder" on_node_uuid="5"/>
       </trigger>
-      <trigger>
-        <rsc_op id="22" operation="stop" operation_key="FAKE2_stop_0" on_node="remote1" on_node_uuid="remote1" router_node="18builder"/>
-      </trigger>
     </inputs>
   </synapse>
 </transition_graph>
diff --git a/pengine/test10/remote-move.summary b/pengine/test10/remote-move.summary
index 4f8b5c2578..de223fd0be 100644
--- a/pengine/test10/remote-move.summary
+++ b/pengine/test10/remote-move.summary
@@ -1,40 +1,38 @@
 
 Current cluster status:
 Online: [ 18builder 18node1 18node2 ]
 RemoteOnline: [ remote1 ]
 
  shooter	(stonith:fence_xvm):	Started 18node1 
  remote1	(ocf::pacemaker:remote):	Started 18builder 
  FAKE1	(ocf::heartbeat:Dummy):	Started 18node2 
  FAKE2	(ocf::heartbeat:Dummy):	Started remote1 
  FAKE3	(ocf::heartbeat:Dummy):	Started 18builder 
  FAKE4	(ocf::heartbeat:Dummy):	Started 18node1 
 
 Transition Summary:
  * Move    shooter	(Started 18node1 -> 18builder)
- * Move    remote1	(Started 18builder -> 18node1)
- * Restart FAKE2	(Started remote1)
+ * Migrate remote1	(Started 18builder -> 18node1)
 
 Executing cluster transition:
  * Resource action: shooter         stop on 18node1
- * Resource action: FAKE2           stop on remote1
+ * Resource action: remote1         migrate_to on 18builder
  * Resource action: shooter         start on 18builder
+ * Resource action: remote1         migrate_from on 18node1
  * Resource action: remote1         stop on 18builder
  * Pseudo action:   all_stopped
  * Resource action: shooter         monitor=60000 on 18builder
- * Resource action: remote1         start on 18node1
- * Resource action: FAKE2           start on remote1
- * Resource action: FAKE2           monitor=60000 on remote1
+ * Pseudo action:   remote1_start_0
  * Resource action: remote1         monitor=60000 on 18node1
 
 Revised cluster status:
 Online: [ 18builder 18node1 18node2 ]
 RemoteOnline: [ remote1 ]
 
  shooter	(stonith:fence_xvm):	Started 18builder 
  remote1	(ocf::pacemaker:remote):	Started 18node1 
  FAKE1	(ocf::heartbeat:Dummy):	Started 18node2 
  FAKE2	(ocf::heartbeat:Dummy):	Started remote1 
  FAKE3	(ocf::heartbeat:Dummy):	Started 18builder 
  FAKE4	(ocf::heartbeat:Dummy):	Started 18node1