diff --git a/include/crm/common/internal.h b/include/crm/common/internal.h
index c587c53823..e5f34ec903 100644
--- a/include/crm/common/internal.h
+++ b/include/crm/common/internal.h
@@ -1,125 +1,126 @@
 /*
  * Copyright (C) 2015
  *     Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU Lesser General Public License as published by
  * the Free Software Foundation; either version 2 of the License, or
  * (at your option) any later version.
  *
  * This program is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #ifndef CRM_COMMON_INTERNAL__H
 #define CRM_COMMON_INTERNAL__H
 
 #include <glib.h>       /* for gboolean */
 #include <dirent.h>     /* for struct dirent */
 #include <unistd.h>     /* for getpid() */
 #include <sys/types.h>  /* for uid_t and gid_t */
 
 #include <crm/common/logging.h>
 
 /* internal I/O utilities (from io.c) */
 
 char *generate_series_filename(const char *directory, const char *series, int sequence,
                                gboolean bzip);
 int get_last_sequence(const char *directory, const char *series);
 void write_last_sequence(const char *directory, const char *series, int sequence, int max);
 int crm_chown_last_sequence(const char *directory, const char *series, uid_t uid, gid_t gid);
 
 gboolean crm_is_writable(const char *dir, const char *file, const char *user, const char *group,
                          gboolean need_both);
 
 void crm_sync_directory(const char *name);
 
 char *crm_read_contents(const char *filename);
 int crm_write_sync(int fd, const char *contents);
 
 
 /* internal procfs utilities (from procfs.c) */
 
 int crm_procfs_process_info(struct dirent *entry, char *name, int *pid);
 int crm_procfs_pid_of(const char *name);
 unsigned int crm_procfs_num_cores(void);
 
 
 /* internal XML schema functions (from xml.c) */
 
 void crm_schema_init(void);
 void crm_schema_cleanup(void);
 
 
 /* internal generic string functions (from strings.c) */
 
 char *crm_concat(const char *prefix, const char *suffix, char join);
 void g_hash_destroy_str(gpointer data);
 long long crm_int_helper(const char *text, char **end_text);
 gboolean crm_ends_with(const char *s, const char *match);
 gboolean crm_ends_with_ext(const char *s, const char *match);
 char *add_list_element(char *list, const char *value);
 bool crm_compress_string(const char *data, int length, int max, char **result,
                          unsigned int *result_len);
+gint crm_alpha_sort(gconstpointer a, gconstpointer b);
 
 static inline int
 crm_strlen_zero(const char *s)
 {
     return !s || *s == '\0';
 }
 
 static inline char *
 crm_getpid_s()
 {
     return crm_strdup_printf("%lu", (unsigned long) getpid());
 }
 
 /* convenience functions for failure-related node attributes */
 
 #define CRM_FAIL_COUNT_PREFIX   "fail-count"
 #define CRM_LAST_FAILURE_PREFIX "last-failure"
 
 /*!
  * \internal
  * \brief Generate a failure-related node attribute name for a resource
  *
  * \param[in] prefix    Start of attribute name
  * \param[in] rsc_id    Resource name
  * \param[in] op        Operation name
  * \param[in] interval  Operation interval
  *
  * \return Newly allocated string with attribute name
  *
  * \note Failure attributes are named like PREFIX-RSC#OP_INTERVAL (for example,
  *       "fail-count-myrsc#monitor_30000"). The '#' is used because it is not
  *       a valid character in a resource ID, to reliably distinguish where the
  *       operation name begins. The '_' is used simply to be more comparable to
  *       action labels like "myrsc_monitor_30000".
  */
 static inline char *
 crm_fail_attr_name(const char *prefix, const char *rsc_id, const char *op,
                    int interval)
 {
     CRM_CHECK(prefix && rsc_id && op, return NULL);
     return crm_strdup_printf("%s-%s#%s_%d", prefix, rsc_id, op, interval);
 }
 
 static inline char *
 crm_failcount_name(const char *rsc_id, const char *op, int interval)
 {
     return crm_fail_attr_name(CRM_FAIL_COUNT_PREFIX, rsc_id, op, interval);
 }
 
 static inline char *
 crm_lastfailure_name(const char *rsc_id, const char *op, int interval)
 {
     return crm_fail_attr_name(CRM_LAST_FAILURE_PREFIX, rsc_id, op, interval);
 }
 
 #endif /* CRM_COMMON_INTERNAL__H */
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
index acefd2d305..d09ab92a9f 100644
--- a/lib/cluster/membership.c
+++ b/lib/cluster/membership.c
@@ -1,1114 +1,1125 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #include <crm_internal.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <sys/param.h>
 #include <sys/types.h>
 #include <stdio.h>
 #include <unistd.h>
 #include <string.h>
 #include <glib.h>
 #include <crm/common/ipc.h>
 #include <crm/cluster/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/stonith-ng.h>
 
 #define s_if_plural(i) (((i) == 1)? "" : "s")
 
 /* The peer cache remembers cluster nodes that have been seen.
  * This is managed mostly automatically by libcluster, based on
  * cluster membership events.
  *
  * Because cluster nodes can have conflicting names or UUIDs,
  * the hash table key is a uniquely generated ID.
  */
 GHashTable *crm_peer_cache = NULL;
 
 /*
  * The remote peer cache tracks pacemaker_remote nodes. While the
  * value has the same type as the peer cache's, it is tracked separately for
  * three reasons: pacemaker_remote nodes can't have conflicting names or UUIDs,
  * so the name (which is also the UUID) is used as the hash table key; there
  * is no equivalent of membership events, so management is not automatic; and
  * most users of the peer cache need to exclude pacemaker_remote nodes.
  *
  * That said, using a single cache would be more logical and less error-prone,
  * so it would be a good idea to merge them one day.
  *
  * libcluster provides two avenues for populating the cache:
  * crm_remote_peer_get(), crm_remote_peer_cache_add() and
  * crm_remote_peer_cache_remove() directly manage it,
  * while crm_remote_peer_cache_refresh() populates it via the CIB.
  */
 GHashTable *crm_remote_peer_cache = NULL;
 
 unsigned long long crm_peer_seq = 0;
 gboolean crm_have_quorum = FALSE;
 static gboolean crm_autoreap  = TRUE;
 
 int
 crm_remote_peer_cache_size(void)
 {
     if (crm_remote_peer_cache == NULL) {
         return 0;
     }
     return g_hash_table_size(crm_remote_peer_cache);
 }
 
 /*!
  * \brief Get a remote node peer cache entry, creating it if necessary
  *
  * \param[in] node_name  Name of remote node
  *
  * \return Cache entry for node on success, NULL (and set errno) otherwise
  *
  * \note When creating a new entry, this will leave the node state undetermined,
  *       so the caller should also call crm_update_peer_state() if the state is
  *       known.
  */
 crm_node_t *
 crm_remote_peer_get(const char *node_name)
 {
     crm_node_t *node;
 
     if (node_name == NULL) {
         errno = -EINVAL;
         return NULL;
     }
 
     /* Return existing cache entry if one exists */
     node = g_hash_table_lookup(crm_remote_peer_cache, node_name);
     if (node) {
         return node;
     }
 
     /* Allocate a new entry */
     node = calloc(1, sizeof(crm_node_t));
     if (node == NULL) {
         return NULL;
     }
 
     /* Populate the essential information */
     node->flags = crm_remote_node;
     node->uuid = strdup(node_name);
     if (node->uuid == NULL) {
         free(node);
         errno = -ENOMEM;
         return NULL;
     }
 
     /* Add the new entry to the cache */
     g_hash_table_replace(crm_remote_peer_cache, node->uuid, node);
     crm_trace("added %s to remote cache", node_name);
 
     /* Update the entry's uname, ensuring peer status callbacks are called */
     crm_update_peer_uname(node, node_name);
     return node;
 }
 
 /*!
  * \brief Add a node to the remote peer cache
  *
  * \param[in] node_name  Name of remote node
  *
  * \note This is a legacy convenience wrapper for crm_remote_peer_get()
  *       for callers that don't need the cache entry returned.
  */
 void
 crm_remote_peer_cache_add(const char *node_name)
 {
     CRM_ASSERT(crm_remote_peer_get(node_name) != NULL);
 }
 
 void
 crm_remote_peer_cache_remove(const char *node_name)
 {
     if (g_hash_table_remove(crm_remote_peer_cache, node_name)) {
         crm_trace("removed %s from remote peer cache", node_name);
     }
 }
 
 /*!
  * \internal
  * \brief Return node status based on a CIB status entry
  *
  * \param[in] node_state  XML of node state
  *
  * \return CRM_NODE_LOST if XML_NODE_IN_CLUSTER is false in node_state,
  *         CRM_NODE_MEMBER otherwise
  * \note Unlike most boolean XML attributes, this one defaults to true, for
  *       backward compatibility with older crmd versions that don't set it.
  */
 static const char *
 remote_state_from_cib(xmlNode *node_state)
 {
     const char *status;
 
     status = crm_element_value(node_state, XML_NODE_IN_CLUSTER);
     if (status && !crm_is_true(status)) {
         status = CRM_NODE_LOST;
     } else {
         status = CRM_NODE_MEMBER;
     }
     return status;
 }
 
 /* user data for looping through remote node xpath searches */
 struct refresh_data {
     const char *field;  /* XML attribute to check for node name */
     gboolean has_state; /* whether to update node state based on XML */
 };
 
 /*!
  * \internal
  * \brief Process one pacemaker_remote node xpath search result
  *
  * \param[in] result     XML search result
  * \param[in] user_data  what to look for in the XML
  */
 static void
 remote_cache_refresh_helper(xmlNode *result, void *user_data)
 {
     struct refresh_data *data = user_data;
     const char *remote = crm_element_value(result, data->field);
     const char *state = NULL;
     crm_node_t *node;
 
     CRM_CHECK(remote != NULL, return);
 
     /* Determine node's state, if the result has it */
     if (data->has_state) {
         state = remote_state_from_cib(result);
     }
 
     /* Check whether cache already has entry for node */
     node = g_hash_table_lookup(crm_remote_peer_cache, remote);
 
     if (node == NULL) {
         /* Node is not in cache, so add a new entry for it */
         node = crm_remote_peer_get(remote);
         CRM_ASSERT(node);
         if (state) {
             crm_update_peer_state(__FUNCTION__, node, state, 0);
         }
 
     } else if (is_set(node->flags, crm_node_dirty)) {
         /* Node is in cache and hasn't been updated already, so mark it clean */
         clear_bit(node->flags, crm_node_dirty);
         if (state) {
             crm_update_peer_state(__FUNCTION__, node, state, 0);
         }
     }
 }
 
 static void
 mark_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     set_bit(((crm_node_t*)value)->flags, crm_node_dirty);
 }
 
 static gboolean
 is_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     return is_set(((crm_node_t*)value)->flags, crm_node_dirty);
 }
 
 /* search string to find CIB resources entries for guest nodes */
 #define XPATH_GUEST_NODE_CONFIG \
     "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \
     "//" XML_TAG_META_SETS "//" XML_CIB_TAG_NVPAIR \
     "[@name='" XML_RSC_ATTR_REMOTE_NODE "']"
 
 /* search string to find CIB resources entries for remote nodes */
 #define XPATH_REMOTE_NODE_CONFIG \
     "//" XML_TAG_CIB "//" XML_CIB_TAG_CONFIGURATION "//" XML_CIB_TAG_RESOURCE \
     "[@type='remote'][@provider='pacemaker']"
 
 /* search string to find CIB node status entries for pacemaker_remote nodes */
 #define XPATH_REMOTE_NODE_STATUS \
     "//" XML_TAG_CIB "//" XML_CIB_TAG_STATUS "//" XML_CIB_TAG_STATE \
     "[@" XML_NODE_IS_REMOTE "='true']"
 
 /*!
  * \brief Repopulate the remote peer cache based on CIB XML
  *
  * \param[in] xmlNode  CIB XML to parse
  */
 void
 crm_remote_peer_cache_refresh(xmlNode *cib)
 {
     struct refresh_data data;
 
     crm_peer_init();
 
     /* First, we mark all existing cache entries as dirty,
      * so that later we can remove any that weren't in the CIB.
      * We don't empty the cache, because we need to detect changes in state.
      */
     g_hash_table_foreach(crm_remote_peer_cache, mark_dirty, NULL);
 
     /* Look for guest nodes and remote nodes in the status section */
     data.field = "id";
     data.has_state = TRUE;
     crm_foreach_xpath_result(cib, XPATH_REMOTE_NODE_STATUS,
                              remote_cache_refresh_helper, &data);
 
     /* Look for guest nodes and remote nodes in the configuration section,
      * because they may have just been added and not have a status entry yet.
      * In that case, the cached node state will be left NULL, so that the
      * peer status callback isn't called until we're sure the node started
      * successfully.
      */
     data.field = "value";
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, XPATH_GUEST_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
     data.field = "id";
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, XPATH_REMOTE_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
 
     /* Remove all old cache entries that weren't seen in the CIB */
     g_hash_table_foreach_remove(crm_remote_peer_cache, is_dirty, NULL);
 }
 
 gboolean
 crm_is_peer_active(const crm_node_t * node)
 {
     if(node == NULL) {
         return FALSE;
     }
 
     if (is_set(node->flags, crm_remote_node)) {
         /* remote nodes are never considered active members. This
          * guarantees they will never be considered for DC membership.*/
         return FALSE;
     }
 #if SUPPORT_COROSYNC
     if (is_openais_cluster()) {
         return crm_is_corosync_peer_active(node);
     }
 #endif
 #if SUPPORT_HEARTBEAT
     if (is_heartbeat_cluster()) {
         return crm_is_heartbeat_peer_active(node);
     }
 #endif
     crm_err("Unhandled cluster type: %s", name_for_cluster_type(get_cluster_type()));
     return FALSE;
 }
 
 static gboolean
 crm_reap_dead_member(gpointer key, gpointer value, gpointer user_data)
 {
     crm_node_t *node = value;
     crm_node_t *search = user_data;
 
     if (search == NULL) {
         return FALSE;
 
     } else if (search->id && node->id != search->id) {
         return FALSE;
 
     } else if (search->id == 0 && safe_str_neq(node->uname, search->uname)) {
         return FALSE;
 
     } else if (crm_is_peer_active(value) == FALSE) {
         crm_info("Removing node with name %s and id %u from membership cache",
                  (node->uname? node->uname : "unknown"), node->id);
         return TRUE;
     }
     return FALSE;
 }
 
 /*!
  * \brief Remove all peer cache entries matching a node ID and/or uname
  *
  * \param[in] id    ID of node to remove (or 0 to ignore)
  * \param[in] name  Uname of node to remove (or NULL to ignore)
  *
  * \return Number of cache entries removed
  */
 guint
 reap_crm_member(uint32_t id, const char *name)
 {
     int matches = 0;
     crm_node_t search;
 
     if (crm_peer_cache == NULL) {
         crm_trace("Membership cache not initialized, ignoring purge request");
         return 0;
     }
 
     search.id = id;
     search.uname = name ? strdup(name) : NULL;
     matches = g_hash_table_foreach_remove(crm_peer_cache, crm_reap_dead_member, &search);
     if(matches) {
         crm_notice("Purged %d peer%s with id=%u%s%s from the membership cache",
                    matches, s_if_plural(matches), search.id,
                    (search.uname? " and/or uname=" : ""),
                    (search.uname? search.uname : ""));
 
     } else {
         crm_info("No peers with id=%u%s%s to purge from the membership cache",
                  search.id, (search.uname? " and/or uname=" : ""),
                  (search.uname? search.uname : ""));
     }
 
     free(search.uname);
     return matches;
 }
 
 static void
 crm_count_peer(gpointer key, gpointer value, gpointer user_data)
 {
     guint *count = user_data;
     crm_node_t *node = value;
 
     if (crm_is_peer_active(node)) {
         *count = *count + 1;
     }
 }
 
 guint
 crm_active_peers(void)
 {
     guint count = 0;
 
     if (crm_peer_cache) {
         g_hash_table_foreach(crm_peer_cache, crm_count_peer, &count);
     }
     return count;
 }
 
 static void
 destroy_crm_node(gpointer data)
 {
     crm_node_t *node = data;
 
     crm_trace("Destroying entry for node %u: %s", node->id, node->uname);
 
     free(node->addr);
     free(node->uname);
     free(node->state);
     free(node->uuid);
     free(node->expected);
     free(node);
 }
 
 void
 crm_peer_init(void)
 {
     if (crm_peer_cache == NULL) {
         crm_peer_cache = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, free, destroy_crm_node);
     }
 
     if (crm_remote_peer_cache == NULL) {
         crm_remote_peer_cache = g_hash_table_new_full(crm_strcase_hash, crm_strcase_equal, NULL, destroy_crm_node);
     }
 }
 
 void
 crm_peer_destroy(void)
 {
     if (crm_peer_cache != NULL) {
         crm_trace("Destroying peer cache with %d members", g_hash_table_size(crm_peer_cache));
         g_hash_table_destroy(crm_peer_cache);
         crm_peer_cache = NULL;
     }
 
     if (crm_remote_peer_cache != NULL) {
         crm_trace("Destroying remote peer cache with %d members", g_hash_table_size(crm_remote_peer_cache));
         g_hash_table_destroy(crm_remote_peer_cache);
         crm_remote_peer_cache = NULL;
     }
 }
 
 void (*crm_status_callback) (enum crm_status_type, crm_node_t *, const void *) = NULL;
 
 /*!
  * \brief Set a client function that will be called after peer status changes
  *
  * \param[in] dispatch  Pointer to function to use as callback
  *
  * \note Previously, client callbacks were responsible for peer cache
  *       management. This is no longer the case, and client callbacks should do
  *       only client-specific handling. Callbacks MUST NOT add or remove entries
  *       in the peer caches.
  */
 void
 crm_set_status_callback(void (*dispatch) (enum crm_status_type, crm_node_t *, const void *))
 {
     crm_status_callback = dispatch;
 }
 
 /*!
  * \brief Tell the library whether to automatically reap lost nodes
  *
  * If TRUE (the default), calling crm_update_peer_proc() will also update the
  * peer state to CRM_NODE_MEMBER or CRM_NODE_LOST, and crm_update_peer_state()
  * will reap peers whose state changes to anything other than CRM_NODE_MEMBER.
  * Callers should leave this enabled unless they plan to manage the cache
  * separately on their own.
  *
  * \param[in] autoreap  TRUE to enable automatic reaping, FALSE to disable
  */
 void
 crm_set_autoreap(gboolean autoreap)
 {
     crm_autoreap = autoreap;
 }
 
 static void crm_dump_peer_hash(int level, const char *caller)
 {
     GHashTableIter iter;
     const char *id = NULL;
     crm_node_t *node = NULL;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, (gpointer *) &id, (gpointer *) &node)) {
         do_crm_log(level, "%s: Node %u/%s = %p - %s", caller, node->id, node->uname, node, id);
     }
 }
 
 static gboolean crm_hash_find_by_data(gpointer key, gpointer value, gpointer user_data)
 {
     if(value == user_data) {
         return TRUE;
     }
     return FALSE;
 }
 
 crm_node_t *
 crm_find_peer_full(unsigned int id, const char *uname, int flags)
 {
     crm_node_t *node = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     if (flags & CRM_GET_PEER_REMOTE) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
     }
 
     if (node == NULL && (flags & CRM_GET_PEER_CLUSTER)) {
         node = crm_find_peer(id, uname);
     }
     return node;
 }
 
 crm_node_t *
 crm_get_peer_full(unsigned int id, const char *uname, int flags)
 {
     crm_node_t *node = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     if (flags & CRM_GET_PEER_REMOTE) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
     }
 
     if (node == NULL && (flags & CRM_GET_PEER_CLUSTER)) {
         node = crm_get_peer(id, uname);
     }
     return node;
 }
 
 crm_node_t *
 crm_find_peer(unsigned int id, const char *uname)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
     crm_node_t *by_id = NULL;
     crm_node_t *by_name = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     if (uname != NULL) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(node->uname && strcasecmp(node->uname, uname) == 0) {
                 crm_trace("Name match: %s = %p", node->uname, node);
                 by_name = node;
                 break;
             }
         }
     }
 
     if (id > 0) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(node->id == id) {
                 crm_trace("ID match: %u = %p", node->id, node);
                 by_id = node;
                 break;
             }
         }
     }
 
     node = by_id; /* Good default */
     if(by_id == by_name) {
         /* Nothing to do if they match (both NULL counts) */
         crm_trace("Consistent: %p for %u/%s", by_id, id, uname);
 
     } else if(by_id == NULL && by_name) {
         crm_trace("Only one: %p for %u/%s", by_name, id, uname);
 
         if(id && by_name->id) {
             crm_dump_peer_hash(LOG_WARNING, __FUNCTION__);
             crm_crit("Node %u and %u share the same name '%s'",
                      id, by_name->id, uname);
             node = NULL; /* Create a new one */
 
         } else {
             node = by_name;
         }
 
     } else if(by_name == NULL && by_id) {
         crm_trace("Only one: %p for %u/%s", by_id, id, uname);
 
         if(uname && by_id->uname) {
             crm_dump_peer_hash(LOG_WARNING, __FUNCTION__);
             crm_crit("Node '%s' and '%s' share the same cluster nodeid %u: assuming '%s' is correct",
                      uname, by_id->uname, id, uname);
         }
 
     } else if(uname && by_id->uname) {
         if(safe_str_eq(uname, by_id->uname)) {
             crm_notice("Node '%s' has changed its ID from %u to %u", by_id->uname, by_name->id, by_id->id);
             g_hash_table_foreach_remove(crm_peer_cache, crm_hash_find_by_data, by_name);
 
         } else {
             crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname);
             crm_dump_peer_hash(LOG_INFO, __FUNCTION__);
             crm_abort(__FILE__, __FUNCTION__, __LINE__, "member weirdness", TRUE, TRUE);
         }
 
     } else if(id && by_name->id) {
         crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname);
 
     } else {
         /* Simple merge */
 
         /* Only corosync based clusters use nodeid's
          *
          * The functions that call crm_update_peer_state() only know nodeid
          * so 'by_id' is authorative when merging
          *
          * Same for crm_update_peer_proc()
          */
         crm_dump_peer_hash(LOG_DEBUG, __FUNCTION__);
 
         crm_info("Merging %p into %p", by_name, by_id);
         g_hash_table_foreach_remove(crm_peer_cache, crm_hash_find_by_data, by_name);
     }
 
     return node;
 }
 
 #if SUPPORT_COROSYNC
 static guint
 crm_remove_conflicting_peer(crm_node_t *node)
 {
     int matches = 0;
     GHashTableIter iter;
     crm_node_t *existing_node = NULL;
 
     if (node->id == 0 || node->uname == NULL) {
         return 0;
     }
 
 #  if !SUPPORT_PLUGIN
     if (corosync_cmap_has_config("nodelist") != 0) {
         return 0;
     }
 #  endif
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &existing_node)) {
         if (existing_node->id > 0
             && existing_node->id != node->id
             && existing_node->uname != NULL
             && strcasecmp(existing_node->uname, node->uname) == 0) {
 
             if (crm_is_peer_active(existing_node)) {
                 continue;
             }
 
             crm_warn("Removing cached offline node %u/%s which has conflicting uname with %u",
                      existing_node->id, existing_node->uname, node->id);
 
             g_hash_table_iter_remove(&iter);
             matches++;
         }
     }
 
     return matches;
 }
 #endif
 
 /* coverity[-alloc] Memory is referenced in one or both hashtables */
 crm_node_t *
 crm_get_peer(unsigned int id, const char *uname)
 {
     crm_node_t *node = NULL;
     char *uname_lookup = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     node = crm_find_peer(id, uname);
 
     /* if uname wasn't provided, and find_peer did not turn up a uname based on id.
      * we need to do a lookup of the node name using the id in the cluster membership. */
     if ((node == NULL || node->uname == NULL) && (uname == NULL)) { 
         uname_lookup = get_node_name(id);
     }
 
     if (uname_lookup) {
         uname = uname_lookup;
         crm_trace("Inferred a name of '%s' for node %u", uname, id);
 
         /* try to turn up the node one more time now that we know the uname. */
         if (node == NULL) {
             node = crm_find_peer(id, uname);
         }
     }
 
 
     if (node == NULL) {
         char *uniqueid = crm_generate_uuid();
 
         node = calloc(1, sizeof(crm_node_t));
         CRM_ASSERT(node);
 
         crm_info("Created entry %s/%p for node %s/%u (%d total)",
                  uniqueid, node, uname, id, 1 + g_hash_table_size(crm_peer_cache));
         g_hash_table_replace(crm_peer_cache, uniqueid, node);
     }
 
     if(id > 0 && uname && (node->id == 0 || node->uname == NULL)) {
         crm_info("Node %u is now known as %s", id, uname);
     }
 
     if(id > 0 && node->id == 0) {
         node->id = id;
     }
 
     if (uname && (node->uname == NULL)) {
         crm_update_peer_uname(node, uname);
     }
 
     if(node->uuid == NULL) {
         const char *uuid = crm_peer_uuid(node);
 
         if (uuid) {
             crm_info("Node %u has uuid %s", id, uuid);
 
         } else {
             crm_info("Cannot obtain a UUID for node %u/%s", id, node->uname);
         }
     }
 
     free(uname_lookup);
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Update all of a node's information (process list, state, etc.)
  *
  * \param[in] source      Caller's function name (for log messages)
  *
  * \return NULL if node was reaped from peer caches, pointer to node otherwise
  *
  * \note This function should not be called within a peer cache iteration,
  *       otherwise reaping could invalidate the iterator.
  */
 crm_node_t *
 crm_update_peer(const char *source, unsigned int id, uint64_t born, uint64_t seen, int32_t votes,
                 uint32_t children, const char *uuid, const char *uname, const char *addr,
                 const char *state)
 {
 #if SUPPORT_PLUGIN
     gboolean addr_changed = FALSE;
     gboolean votes_changed = FALSE;
 #endif
     crm_node_t *node = NULL;
 
     id = get_corosync_id(id, uuid);
     node = crm_get_peer(id, uname);
 
     CRM_ASSERT(node != NULL);
 
     if (node->uuid == NULL) {
         if (is_openais_cluster()) {
             /* Yes, overrule whatever was passed in */
             crm_peer_uuid(node);
 
         } else if (uuid != NULL) {
             node->uuid = strdup(uuid);
         }
     }
 
     if (children > 0) {
         if (crm_update_peer_proc(source, node, children, state) == NULL) {
             return NULL;
         }
     }
 
     if (state != NULL) {
         if (crm_update_peer_state(source, node, state, seen) == NULL) {
             return NULL;
         }
     }
 #if SUPPORT_HEARTBEAT
     if (born != 0) {
         node->born = born;
     }
 #endif
 
 #if SUPPORT_PLUGIN
     /* These were only used by the plugin */
     if (born != 0) {
         node->born = born;
     }
 
     if (votes > 0 && node->votes != votes) {
         votes_changed = TRUE;
         node->votes = votes;
     }
 
     if (addr != NULL) {
         if (node->addr == NULL || crm_str_eq(node->addr, addr, FALSE) == FALSE) {
             addr_changed = TRUE;
             free(node->addr);
             node->addr = strdup(addr);
         }
     }
     if (addr_changed || votes_changed) {
         crm_info("%s: Node %s: id=%u state=%s addr=%s%s votes=%d%s born=" U64T " seen=" U64T
                  " proc=%.32x", source, node->uname, node->id, node->state,
                  node->addr, addr_changed ? " (new)" : "", node->votes,
                  votes_changed ? " (new)" : "", node->born, node->last_seen, node->processes);
     }
 #endif
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Update a node's uname
  *
  * \param[in] node        Node object to update
  * \param[in] uname       New name to set
  *
  * \note This function should not be called within a peer cache iteration,
  *       because in some cases it can remove conflicting cache entries,
  *       which would invalidate the iterator.
  */
 void
 crm_update_peer_uname(crm_node_t *node, const char *uname)
 {
-    int i, len = strlen(uname);
+    CRM_CHECK(uname != NULL,
+              crm_err("Bug: can't update node name without name"); return);
+    CRM_CHECK(node != NULL,
+              crm_err("Bug: can't update node name to %s without node", uname);
+              return);
+
+    if (safe_str_eq(uname, node->uname)) {
+        crm_debug("Node uname '%s' did not change", uname);
+        return;
+    }
 
-    for (i = 0; i < len; i++) {
-        if (uname[i] >= 'A' && uname[i] <= 'Z') {
+    for (const char *c = uname; *c; ++c) {
+        if ((*c >= 'A') && (*c <= 'Z')) {
             crm_warn("Node names with capitals are discouraged, consider changing '%s'",
                      uname);
             break;
         }
     }
 
     free(node->uname);
     node->uname = strdup(uname);
+    CRM_ASSERT(node->uname != NULL);
+
     if (crm_status_callback) {
         crm_status_callback(crm_status_uname, node, NULL);
     }
 
 #if SUPPORT_COROSYNC
     if (is_openais_cluster() && !is_set(node->flags, crm_remote_node)) {
         crm_remove_conflicting_peer(node);
     }
 #endif
 }
 
 /*!
  * \internal
  * \brief Update a node's process information (and potentially state)
  *
  * \param[in] source      Caller's function name (for log messages)
  * \param[in] node        Node object to update
  * \param[in] flag        Bitmask of new process information
  * \param[in] status      node status (online, offline, etc.)
  *
  * \return NULL if any node was reaped from peer caches, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible, otherwise
  *       reaping could invalidate the iterator.
  */
 crm_node_t *
 crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const char *status)
 {
     uint32_t last = 0;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set %s to %s for NULL",
                                     source, peer2text(flag), status); return NULL);
 
     /* Pacemaker doesn't spawn processes on remote nodes */
     if (is_set(node->flags, crm_remote_node)) {
         return node;
     }
 
     last = node->processes;
     if (status == NULL) {
         node->processes = flag;
         if (node->processes != last) {
             changed = TRUE;
         }
 
     } else if (safe_str_eq(status, ONLINESTATUS)) {
         if ((node->processes & flag) != flag) {
             set_bit(node->processes, flag);
             changed = TRUE;
         }
 #if SUPPORT_PLUGIN
     } else if (safe_str_eq(status, CRM_NODE_MEMBER)) {
         if (flag > 0 && node->processes != flag) {
             node->processes = flag;
             changed = TRUE;
         }
 #endif
 
     } else if (node->processes & flag) {
         clear_bit(node->processes, flag);
         changed = TRUE;
     }
 
     if (changed) {
         if (status == NULL && flag <= crm_proc_none) {
             crm_info("%s: Node %s[%u] - all processes are now offline", source, node->uname,
                      node->id);
         } else {
             crm_info("%s: Node %s[%u] - %s is now %s", source, node->uname, node->id,
                      peer2text(flag), status);
         }
 
         /* Call the client callback first, then update the peer state,
          * in case the node will be reaped
          */
         if (crm_status_callback) {
             crm_status_callback(crm_status_processes, node, &last);
         }
 
         /* The client callback shouldn't touch the peer caches,
          * but as a safety net, bail if the peer cache was destroyed.
          */
         if (crm_peer_cache == NULL) {
             return NULL;
         }
 
         if (crm_autoreap) {
             node = crm_update_peer_state(__FUNCTION__, node,
                                          is_set(node->processes, crm_get_cluster_proc())?
                                          CRM_NODE_MEMBER : CRM_NODE_LOST, 0);
         }
     } else {
         crm_trace("%s: Node %s[%u] - %s is unchanged (%s)", source, node->uname, node->id,
                   peer2text(flag), status);
     }
     return node;
 }
 
 void
 crm_update_peer_expected(const char *source, crm_node_t * node, const char *expected)
 {
     char *last = NULL;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set 'expected' to %s", source, expected);
               return);
 
     /* Remote nodes don't participate in joins */
     if (is_set(node->flags, crm_remote_node)) {
         return;
     }
 
     last = node->expected;
     if (expected != NULL && safe_str_neq(node->expected, expected)) {
         node->expected = strdup(expected);
         changed = TRUE;
     }
 
     if (changed) {
         crm_info("%s: Node %s[%u] - expected state is now %s (was %s)", source, node->uname, node->id,
                  expected, last);
         free(last);
     } else {
         crm_trace("%s: Node %s[%u] - expected state is unchanged (%s)", source, node->uname,
                   node->id, expected);
     }
 }
 
 /*!
  * \internal
  * \brief Update a node's state and membership information
  *
  * \param[in] source      Caller's function name (for log messages)
  * \param[in] node        Node object to update
  * \param[in] state       Node's new state
  * \param[in] membership  Node's new membership ID
  * \param[in] iter        If not NULL, pointer to node's peer cache iterator
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function may be called from
  *       within a peer cache iteration if the iterator is supplied.
  */
 static crm_node_t *
 crm_update_peer_state_iter(const char *source, crm_node_t * node, const char *state, int membership, GHashTableIter *iter)
 {
     gboolean is_member;
 
     CRM_CHECK(node != NULL,
               crm_err("Could not set state for unknown host to %s"
                       CRM_XS " source=%s", state, source);
               return NULL);
 
     is_member = safe_str_eq(state, CRM_NODE_MEMBER);
     if (membership && is_member) {
         node->last_seen = membership;
     }
 
     if (state && safe_str_neq(node->state, state)) {
         char *last = node->state;
         enum crm_status_type status_type = is_set(node->flags, crm_remote_node)?
                                            crm_status_rstate : crm_status_nstate;
 
         node->state = strdup(state);
         crm_notice("Node %s state is now %s " CRM_XS
                    " nodeid=%u previous=%s source=%s", node->uname, state,
                    node->id, (last? last : "unknown"), source);
         if (crm_status_callback) {
             crm_status_callback(status_type, node, last);
         }
         free(last);
 
         if (crm_autoreap && !is_member && !is_set(node->flags, crm_remote_node)) {
             /* We only autoreap from the peer cache, not the remote peer cache,
              * because the latter should be managed only by
              * crm_remote_peer_cache_refresh().
              */
             if(iter) {
                 crm_notice("Purged 1 peer with id=%u and/or uname=%s from the membership cache", node->id, node->uname);
                 g_hash_table_iter_remove(iter);
 
             } else {
                 reap_crm_member(node->id, node->uname);
             }
             node = NULL;
         }
 
     } else {
         crm_trace("Node %s state is unchanged (%s) " CRM_XS
                   " nodeid=%u source=%s", node->uname, state, node->id, source);
     }
     return node;
 }
 
 /*!
  * \brief Update a node's state and membership information
  *
  * \param[in] source      Caller's function name (for log messages)
  * \param[in] node        Node object to update
  * \param[in] state       Node's new state
  * \param[in] membership  Node's new membership ID
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible,
  *       otherwise reaping could invalidate the iterator.
  */
 crm_node_t *
 crm_update_peer_state(const char *source, crm_node_t * node, const char *state, int membership)
 {
     return crm_update_peer_state_iter(source, node, state, membership, NULL);
 }
 
 /*!
  * \internal
  * \brief Reap all nodes from cache whose membership information does not match
  *
  * \param[in] membership  Membership ID of nodes to keep
  */
 void
 crm_reap_unseen_nodes(uint64_t membership)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
 
     crm_trace("Reaping unseen nodes...");
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&node)) {
         if (node->last_seen != membership) {
             if (node->state) {
                 /*
                  * Calling crm_update_peer_state_iter() allows us to
                  * remove the node from crm_peer_cache without
                  * invalidating our iterator
                  */
                 crm_update_peer_state_iter(__FUNCTION__, node, CRM_NODE_LOST, membership, &iter);
 
             } else {
                 crm_info("State of node %s[%u] is still unknown",
                          node->uname, node->id);
             }
         }
     }
 }
 
 int
 crm_terminate_member(int nodeid, const char *uname, void *unused)
 {
     /* Always use the synchronous, non-mainloop version */
     return stonith_api_kick(nodeid, uname, 120, TRUE);
 }
 
 int
 crm_terminate_member_no_mainloop(int nodeid, const char *uname, int *connection)
 {
     return stonith_api_kick(nodeid, uname, 120, TRUE);
 }
diff --git a/lib/common/strings.c b/lib/common/strings.c
index 573a14b197..6b3a7cf022 100644
--- a/lib/common/strings.c
+++ b/lib/common/strings.c
@@ -1,429 +1,453 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <stdio.h>
 #include <string.h>
 #include <stdlib.h>
 #include <bzlib.h>
 #include <sys/types.h>
 
 char *
 crm_concat(const char *prefix, const char *suffix, char join)
 {
     int len = 0;
     char *new_str = NULL;
 
     CRM_ASSERT(prefix != NULL);
     CRM_ASSERT(suffix != NULL);
     len = strlen(prefix) + strlen(suffix) + 2;
 
     new_str = malloc(len);
     if(new_str) {
         sprintf(new_str, "%s%c%s", prefix, join, suffix);
         new_str[len - 1] = 0;
     }
     return new_str;
 }
 
 char *
 crm_itoa_stack(int an_int, char *buffer, size_t len)
 {
     if (buffer != NULL) {
         snprintf(buffer, len, "%d", an_int);
     }
 
     return buffer;
 }
 
 char *
 crm_itoa(int an_int)
 {
     int len = 32;
     char *buffer = NULL;
 
     buffer = malloc(len + 1);
     if (buffer != NULL) {
         snprintf(buffer, len, "%d", an_int);
     }
 
     return buffer;
 }
 
 void
 g_hash_destroy_str(gpointer data)
 {
     free(data);
 }
 
 long long
 crm_int_helper(const char *text, char **end_text)
 {
     long long result = -1;
     char *local_end_text = NULL;
     int saved_errno = 0;
 
     errno = 0;
 
     if (text != NULL) {
 #ifdef ANSI_ONLY
         if (end_text != NULL) {
             result = strtol(text, end_text, 10);
         } else {
             result = strtol(text, &local_end_text, 10);
         }
 #else
         if (end_text != NULL) {
             result = strtoll(text, end_text, 10);
         } else {
             result = strtoll(text, &local_end_text, 10);
         }
 #endif
 
         saved_errno = errno;
         if (errno == EINVAL) {
             crm_err("Conversion of %s failed", text);
             result = -1;
 
         } else if (errno == ERANGE) {
             crm_err("Conversion of %s was clipped: %lld", text, result);
 
         } else if (errno != 0) {
             crm_perror(LOG_ERR, "Conversion of %s failed", text);
         }
 
         if (local_end_text != NULL && local_end_text[0] != '\0') {
             crm_err("Characters left over after parsing '%s': '%s'", text, local_end_text);
         }
 
         errno = saved_errno;
     }
     return result;
 }
 
 int
 crm_parse_int(const char *text, const char *default_text)
 {
     int atoi_result = -1;
 
     if (text != NULL) {
         atoi_result = crm_int_helper(text, NULL);
         if (errno == 0) {
             return atoi_result;
         }
     }
 
     if (default_text != NULL) {
         atoi_result = crm_int_helper(default_text, NULL);
         if (errno == 0) {
             return atoi_result;
         }
 
     } else {
         crm_err("No default conversion value supplied");
     }
 
     return -1;
 }
 
 gboolean
 safe_str_neq(const char *a, const char *b)
 {
     if (a == b) {
         return FALSE;
 
     } else if (a == NULL || b == NULL) {
         return TRUE;
 
     } else if (strcasecmp(a, b) == 0) {
         return FALSE;
     }
     return TRUE;
 }
 
 gboolean
 crm_is_true(const char *s)
 {
     gboolean ret = FALSE;
 
     if (s != NULL) {
         crm_str_to_boolean(s, &ret);
     }
     return ret;
 }
 
 int
 crm_str_to_boolean(const char *s, int *ret)
 {
     if (s == NULL) {
         return -1;
 
     } else if (strcasecmp(s, "true") == 0
                || strcasecmp(s, "on") == 0
                || strcasecmp(s, "yes") == 0 || strcasecmp(s, "y") == 0 || strcasecmp(s, "1") == 0) {
         *ret = TRUE;
         return 1;
 
     } else if (strcasecmp(s, "false") == 0
                || strcasecmp(s, "off") == 0
                || strcasecmp(s, "no") == 0 || strcasecmp(s, "n") == 0 || strcasecmp(s, "0") == 0) {
         *ret = FALSE;
         return 1;
     }
     return -1;
 }
 
 char *
 crm_strip_trailing_newline(char *str)
 {
     int len;
 
     if (str == NULL) {
         return str;
     }
 
     for (len = strlen(str) - 1; len >= 0 && str[len] == '\n'; len--) {
         str[len] = '\0';
     }
 
     return str;
 }
 
 gboolean
 crm_str_eq(const char *a, const char *b, gboolean use_case)
 {
     if (use_case) {
         return g_strcmp0(a, b) == 0;
 
         /* TODO - Figure out which calls, if any, really need to be case independent */
     } else if (a == b) {
         return TRUE;
 
     } else if (a == NULL || b == NULL) {
         /* shouldn't be comparing NULLs */
         return FALSE;
 
     } else if (strcasecmp(a, b) == 0) {
         return TRUE;
     }
     return FALSE;
 }
 
 static inline const char * null2emptystr(const char *);
 static inline const char *
 null2emptystr(const char *input)
 {
     return (input == NULL) ? "" : input;
 }
 
 static inline int crm_ends_with_internal(const char *, const char *, gboolean);
 static inline int
 crm_ends_with_internal(const char *s, const char *match, gboolean as_extension)
 {
     if ((s == NULL) || (match == NULL)) {
         return 0;
     } else {
         size_t slen, mlen;
 
         if (match[0] != '\0'
             && (as_extension /* following commented out for inefficiency:
                 || strchr(&match[1], match[0]) == NULL */))
                 return !strcmp(null2emptystr(strrchr(s, match[0])), match);
 
         if ((mlen = strlen(match)) == 0)
             return 1;
         slen = strlen(s);
         return ((slen >= mlen) && !strcmp(s + slen - mlen, match));
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a string ends with a certain sequence
  *
  * \param[in] s      String to check
  * \param[in] match  Sequence to match against end of \p s
  *
  * \return \c TRUE if \p s ends (verbatim, i.e., case sensitively)
  *         with match (including empty string), \c FALSE otherwise
  *
  * \see crm_ends_with_ext()
  */
 gboolean
 crm_ends_with(const char *s, const char *match)
 {
     return crm_ends_with_internal(s, match, FALSE);
 }
 
 /*!
  * \internal
  * \brief Check whether a string ends with a certain "extension"
  *
  * \param[in] s      String to check
  * \param[in] match  Extension to match against end of \p s, that is,
  *                   its first character must not occur anywhere
  *                   in the rest of that very sequence (example: file
  *                   extension where the last dot is its delimiter,
  *                   e.g., ".html"); incorrect results may be
  *                   returned otherwise.
  *
  * \return \c TRUE if \p s ends (verbatim, i.e., case sensitively)
  *         with "extension" designated as \p match (including empty
  *         string), \c FALSE otherwise
  *
  * \note Main incentive to prefer this function over \c crm_ends_with
  *       where possible is the efficiency (at the cost of added
  *       restriction on \p match as stated; the complexity class
  *       remains the same, though: BigO(M+N) vs. BigO(M+2N)).
  *
  * \see crm_ends_with()
  */
 gboolean
 crm_ends_with_ext(const char *s, const char *match)
 {
     return crm_ends_with_internal(s, match, TRUE);
 }
 
 /*
  * This re-implements g_str_hash as it was prior to glib2-2.28:
  *
  *   http://git.gnome.org/browse/glib/commit/?id=354d655ba8a54b754cb5a3efb42767327775696c
  *
  * Note that the new g_str_hash is presumably a *better* hash (it's actually
  * a correct implementation of DJB's hash), but we need to preserve existing
  * behaviour, because the hash key ultimately determines the "sort" order
  * when iterating through GHashTables, which affects allocation of scores to
  * clone instances when iterating through rsc->allowed_nodes.  It (somehow)
  * also appears to have some minor impact on the ordering of a few
  * pseudo_event IDs in the transition graph.
  */
 guint
 g_str_hash_traditional(gconstpointer v)
 {
     const signed char *p;
     guint32 h = 0;
 
     for (p = v; *p != '\0'; p++)
         h = (h << 5) - h + *p;
 
     return h;
 }
 
 guint
 crm_strcase_hash(gconstpointer v)
 {
     const signed char *p;
     guint32 h = 0;
 
     for (p = v; *p != '\0'; p++)
         h = (h << 5) - h + g_ascii_tolower(*p);
 
     return h;
 }
 
 static void
 copy_str_table_entry(gpointer key, gpointer value, gpointer user_data)
 {
     if (key && value && user_data) {
         g_hash_table_insert((GHashTable*)user_data, strdup(key), strdup(value));
     }
 }
 
 GHashTable *
 crm_str_table_dup(GHashTable *old_table)
 {
     GHashTable *new_table = NULL;
 
     if (old_table) {
         new_table = crm_str_table_new();
         g_hash_table_foreach(old_table, copy_str_table_entry, new_table);
     }
     return new_table;
 }
 
 char *
 add_list_element(char *list, const char *value)
 {
     int len = 0;
     int last = 0;
 
     if (value == NULL) {
         return list;
     }
     if (list) {
         last = strlen(list);
     }
     len = last + 2;             /* +1 space, +1 EOS */
     len += strlen(value);
     list = realloc_safe(list, len);
     sprintf(list + last, " %s", value);
     return list;
 }
 
 bool
 crm_compress_string(const char *data, int length, int max, char **result, unsigned int *result_len)
 {
     int rc;
     char *compressed = NULL;
     char *uncompressed = strdup(data);
     struct timespec after_t;
     struct timespec before_t;
 
     if(max == 0) {
         max = (length * 1.1) + 600; /* recommended size */
     }
 
 #ifdef CLOCK_MONOTONIC
     clock_gettime(CLOCK_MONOTONIC, &before_t);
 #endif
 
     /* coverity[returned_null] Ignore */
     compressed = malloc(max);
 
     *result_len = max;
     rc = BZ2_bzBuffToBuffCompress(compressed, result_len, uncompressed, length, CRM_BZ2_BLOCKS, 0,
                                   CRM_BZ2_WORK);
 
     free(uncompressed);
 
     if (rc != BZ_OK) {
         crm_err("Compression of %d bytes failed: %s (%d)", length, bz2_strerror(rc), rc);
         free(compressed);
         return FALSE;
     }
 
 #ifdef CLOCK_MONOTONIC
     clock_gettime(CLOCK_MONOTONIC, &after_t);
 
     crm_trace("Compressed %d bytes into %d (ratio %d:1) in %ldms",
              length, *result_len, length / (*result_len),
              (after_t.tv_sec - before_t.tv_sec) * 1000 + (after_t.tv_nsec -
                                                           before_t.tv_nsec) / 1000000);
 #else
     crm_trace("Compressed %d bytes into %d (ratio %d:1)",
              length, *result_len, length / (*result_len));
 #endif
 
     *result = compressed;
     return TRUE;
 }
+
+/*!
+ * \brief Compare two strings alphabetically (case-insensitive)
+ *
+ * \param[in] a  First string to compare
+ * \param[in] b  Second string to compare
+ *
+ * \return 0 if strings are equal, -1 if a < b, 1 if a > b
+ *
+ * \note Usable as a GCompareFunc with g_list_sort().
+ *       NULL is considered less than non-NULL.
+ */
+gint
+crm_alpha_sort(gconstpointer a, gconstpointer b)
+{
+    if (!a && !b) {
+        return 0;
+    } else if (!a) {
+        return -1;
+    } else if (!b) {
+        return 1;
+    }
+    return strcasecmp(a, b);
+}
diff --git a/lib/pengine/container.c b/lib/pengine/container.c
index 206aabe842..0a524896d5 100644
--- a/lib/pengine/container.c
+++ b/lib/pengine/container.c
@@ -1,1336 +1,1346 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
+#include <ctype.h>
+
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <unpack.h>
 #include <crm/msg_xml.h>
 
 #define VARIANT_CONTAINER 1
 #include "./variant.h"
 
 void tuple_free(container_grouping_t *tuple);
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static int
 allocate_ip(container_variant_data_t *data, container_grouping_t *tuple, char *buffer, int max) 
 {
     if(data->ip_range_start == NULL) {
         return 0;
 
     } else if(data->ip_last) {
         tuple->ipaddr = next_ip(data->ip_last);
 
     } else {
         tuple->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = tuple->ipaddr;
 #if 0
     return snprintf(buffer, max, " --add-host=%s-%d:%s --link %s-docker-%d:%s-link-%d",
                     data->prefix, tuple->offset, tuple->ipaddr,
                     data->prefix, tuple->offset, data->prefix, tuple->offset);
 #else
     if (data->type == PE_CONTAINER_TYPE_DOCKER) {
         return snprintf(buffer, max, " --add-host=%s-%d:%s",
                         data->prefix, tuple->offset, tuple->ipaddr);
     } else if (data->type == PE_CONTAINER_TYPE_RKT) {
         return snprintf(buffer, max, " --hosts-entry=%s=%s-%d",
                         tuple->ipaddr, data->prefix, tuple->offset);
     } else {
         return 0;
     }
 #endif
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind) 
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, XML_ATTR_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(container_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->replicas_per_host > 1) {
             pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
             data->replicas_per_host = 1;
             /* @TODO to be sure: clear_bit(rsc->flags, pe_rsc_unique); */
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static bool
 create_ip_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set) 
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, tuple->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", tuple->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (common_unpack(xml_ip, &tuple->ip, parent, data_set) == false) {
             return FALSE;
         }
 
         parent->children = g_list_append(parent->children, tuple->ip);
     }
     return TRUE;
 }
 
 static bool
 create_docker_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set) 
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_docker = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-docker-%d", data->prefix, tuple->offset);
         crm_xml_sanitize_id(id);
         xml_docker = create_resource(id, "heartbeat", "docker");
         free(id);
 
         xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
         offset += snprintf(buffer+offset, max-offset, " --restart=no");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " -h %s-%d",
                                data->prefix, tuple->offset);
         }
 
         if(data->docker_network) {
 //        offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
             offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " -e PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             container_mount_t *mount = pIter->data;
 
             if(mount->flags) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, tuple->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", source, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -v %s:%s", mount->source, mount->target);
             }
             if(mount->options) {
                 offset += snprintf(buffer+offset, max-offset, ":%s", mount->options);
             }
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             container_port_t *port = pIter->data;
 
             if(tuple->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s:%s",
                                    tuple->ipaddr, port->source, port->target);
             } else {
                 offset += snprintf(buffer+offset, max-offset, " -p %s:%s", port->source, port->target);
             }
         }
 
         if(data->docker_run_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
         }
 
         if(data->docker_host_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if(tuple->child) {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->docker_run_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", SBIN_DIR "/pacemaker_remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd", "/usr/libexec/pacemaker/lrmd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL,
                                       "run_cmd", data->docker_run_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_docker, "operations");
         crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, tuple->docker);
         return TRUE;
 }
 
 static bool
 create_rkt_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set)
 {
         int offset = 0, max = 4096;
         char *buffer = calloc(1, max+1);
 
         int doffset = 0, dmax = 1024;
         char *dbuffer = calloc(1, dmax+1);
 
         char *id = NULL;
         xmlNode *xml_docker = NULL;
         xmlNode *xml_obj = NULL;
 
         int volid = 0;
 
         id = crm_strdup_printf("%s-rkt-%d", data->prefix, tuple->offset);
         crm_xml_sanitize_id(id);
         xml_docker = create_resource(id, "heartbeat", "rkt");
         free(id);
 
         xml_obj = create_xml_node(xml_docker, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, tuple->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
         crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", "true");
         crm_create_nvpair_xml(xml_obj, NULL, "force_kill", "false");
         crm_create_nvpair_xml(xml_obj, NULL, "reuse", "false");
 
         /* Set a container hostname only if we have an IP to map it to.
          * The user can set -h or --uts=host themselves if they want a nicer
          * name for logs, but this makes applications happy who need their
          * hostname to match the IP they bind to.
          */
         if (data->ip_range_start != NULL) {
             offset += snprintf(buffer+offset, max-offset, " --hostname=%s-%d",
                                data->prefix, tuple->offset);
         }
 
         if(data->docker_network) {
 //        offset += snprintf(buffer+offset, max-offset, " --link-local-ip=%s", tuple->ipaddr);
             offset += snprintf(buffer+offset, max-offset, " --net=%s", data->docker_network);
         }
 
         if(data->control_port) {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%s", data->control_port);
         } else {
             offset += snprintf(buffer+offset, max-offset, " --environment=PCMK_remote_port=%d", DEFAULT_REMOTE_PORT);
         }
 
         for(GListPtr pIter = data->mounts; pIter != NULL; pIter = pIter->next) {
             container_mount_t *mount = pIter->data;
 
             if(mount->flags) {
                 char *source = crm_strdup_printf(
                     "%s/%s-%d", mount->source, data->prefix, tuple->offset);
 
                 if(doffset > 0) {
                     doffset += snprintf(dbuffer+doffset, dmax-doffset, ",");
                 }
                 doffset += snprintf(dbuffer+doffset, dmax-doffset, "%s", source);
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
                 free(source);
 
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --volume vol%d,kind=host,source=%s", volid, mount->source);
                 if(mount->options) {
                     offset += snprintf(buffer+offset, max-offset, ",%s", mount->options);
                 }
                 offset += snprintf(buffer+offset, max-offset, " --mount volume=vol%d,target=%s", volid, mount->target);
             }
             volid++;
         }
 
         for(GListPtr pIter = data->ports; pIter != NULL; pIter = pIter->next) {
             container_port_t *port = pIter->data;
 
             if(tuple->ipaddr) {
                 offset += snprintf(buffer+offset, max-offset, " --port=%s:%s:%s",
                                    port->target, tuple->ipaddr, port->source);
             } else {
                 offset += snprintf(buffer+offset, max-offset, " --port=%s:%s", port->target, port->source);
             }
         }
 
         if(data->docker_run_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_run_options);
         }
 
         if(data->docker_host_options) {
             offset += snprintf(buffer+offset, max-offset, " %s", data->docker_host_options);
         }
 
         crm_create_nvpair_xml(xml_obj, NULL, "run_opts", buffer);
         free(buffer);
 
         crm_create_nvpair_xml(xml_obj, NULL, "mount_points", dbuffer);
         free(dbuffer);
 
         if(tuple->child) {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", data->docker_run_command);
             } else {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd", SBIN_DIR"/pacemaker_remoted");
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We just want to know if the container is alive, we'll
              * monitor the child independently
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         /* } else if(child && data->untrusted) {
          * Support this use-case?
          *
          * The ability to have resources started/stopped by us, but
          * unable to set attributes, etc.
          *
          * Arguably better to control API access this with ACLs like
          * "normal" remote nodes
          *
          *     crm_create_nvpair_xml(xml_obj, NULL,
          *                           "run_cmd", "/usr/libexec/pacemaker/lrmd");
          *     crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
          *         "/usr/libexec/pacemaker/lrmd_internal_ctl -c poke");
          */
         } else {
             if(data->docker_run_command) {
                 crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                       data->docker_run_command);
             }
 
             /* TODO: Allow users to specify their own?
              *
              * We don't know what's in the container, so we just want
              * to know if it is alive
              */
             crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
         }
 
 
         xml_obj = create_xml_node(xml_docker, "operations");
         crm_create_op_xml(xml_obj, ID(xml_docker), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (common_unpack(xml_docker, &tuple->docker, parent, data_set) == FALSE) {
             return FALSE;
         }
         parent->children = g_list_append(parent->children, tuple->docker);
         return TRUE;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pe_node_t *) match)->weight = -INFINITY;
         ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
     }
     if (rsc->children) {
         GListPtr child;
 
         for (child = rsc->children; child != NULL; child = child->next) {
             disallow_node((resource_t *) (child->data), uname);
         }
     }
 }
 
 static bool
 create_remote_resource(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set) 
 {
     if (tuple->child && valid_network(data)) {
         GHashTableIter gIter;
         GListPtr rsc_iter = NULL;
         node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, tuple->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (remote_id_conflict(id, data_set)) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d", tuple->child->id, tuple->offset);
             CRM_ASSERT(remote_id_conflict(id, data_set) == FALSE);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (tuple->ipaddr? tuple->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = crm_itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets tuple->docker as tuple->remote's container, which is
          * similar to what happens with guest nodes. This is how the PE knows
          * that the bundle node is fenced by recovering docker, and that
          * remote should be ordered relative to docker.
          */
         xml_remote = pe_create_remote_xml(NULL, id, tuple->docker->id,
                                           XML_BOOLEAN_FALSE, NULL, "60s", NULL,
                                           NULL, connect_name,
                                           (data->control_port?
                                            data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during data set cleanup to use as
          * the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(data_set->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   data_set);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pe_discover_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
          * Unfortunately, a bundle has to be mostly unpacked before it's obvious
          * what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when common_unpack() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         for (rsc_iter = data_set->resources; rsc_iter; rsc_iter = rsc_iter->next) {
             disallow_node((resource_t *) (rsc_iter->data), uname);
         }
 
         tuple->node = node_copy(node);
         tuple->node->weight = 500;
         tuple->node->rsc_discover_mode = pe_discover_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         g_hash_table_insert(tuple->child->allowed_nodes, (gpointer) tuple->node->details->id, node_copy(tuple->node));
 
         if (common_unpack(xml_remote, &tuple->remote, parent, data_set) == FALSE) {
             return FALSE;
         }
 
         g_hash_table_iter_init(&gIter, tuple->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if(is_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         tuple->node->details->remote_rsc = tuple->remote;
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(tuple->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * tuple->remote to tuple->docker's fillers, which will make
          * rsc_contains_remote_node() true for tuple->docker.
          *
          * tuple->child does NOT get added to tuple->docker's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking tuple->docker's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, tuple->remote);
     }
     return TRUE;
 }
 
 static bool
 create_container(
     resource_t *parent, container_variant_data_t *data, container_grouping_t *tuple,
     pe_working_set_t * data_set)
 {
 
     if (data->type == PE_CONTAINER_TYPE_DOCKER &&
           create_docker_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
     if (data->type == PE_CONTAINER_TYPE_RKT &&
           create_rkt_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
 
     if(create_ip_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
     if(create_remote_resource(parent, data, tuple, data_set) == FALSE) {
         return FALSE;
     }
     if(tuple->child && tuple->ipaddr) {
         add_hash_param(tuple->child->meta, "external-ip", tuple->ipaddr);
     }
 
     if(tuple->remote) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the docker container
          * is active.
          *
          * Makes it possible to have remote nodes, running docker
          * containers with pacemaker_remoted inside in order to start
          * services inside those containers.
          */
         set_bit(tuple->remote->flags, pe_rsc_allow_remote_remotes);
     }
 
     return TRUE;
 }
 
 static void
 mount_add(container_variant_data_t *container_data, const char *source,
           const char *target, const char *options, int flags)
 {
     container_mount_t *mount = calloc(1, sizeof(container_mount_t));
 
     mount->source = strdup(source);
     mount->target = strdup(target);
     if (options) {
         mount->options = strdup(options);
     }
     mount->flags = flags;
     container_data->mounts = g_list_append(container_data->mounts, mount);
 }
 
 static void mount_free(container_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void port_free(container_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 gboolean
 container_unpack(resource_t * rsc, pe_working_set_t * data_set)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     container_variant_data_t *container_data = NULL;
 
     CRM_ASSERT(rsc != NULL);
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     container_data = calloc(1, sizeof(container_variant_data_t));
     rsc->variant_opaque = container_data;
     container_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, "docker");
     if (xml_obj != NULL) {
         container_data->type = PE_CONTAINER_TYPE_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, "rkt");
         if (xml_obj != NULL) {
             container_data->type = PE_CONTAINER_TYPE_RKT;
         } else {
             return FALSE;
         }
     }
 
     value = crm_element_value(xml_obj, "masters");
     container_data->masters = crm_parse_int(value, "0");
     if (container_data->masters < 0) {
         pe_err("'masters' for %s must be nonnegative integer, using 0",
                rsc->id);
         container_data->masters = 0;
     }
 
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && (container_data->masters > 0)) {
         container_data->replicas = container_data->masters;
     } else {
         container_data->replicas = crm_parse_int(value, "1");
     }
     if (container_data->replicas < 1) {
         pe_err("'replicas' for %s must be positive integer, using 1", rsc->id);
         container_data->replicas = 1;
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if docker is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     container_data->replicas_per_host = crm_parse_int(value, "1");
     if (container_data->replicas_per_host < 1) {
         pe_err("'replicas-per-host' for %s must be positive integer, using 1",
                rsc->id);
         container_data->replicas_per_host = 1;
     }
     if (container_data->replicas_per_host == 1) {
         clear_bit(rsc->flags, pe_rsc_unique);
     }
 
     container_data->docker_run_command = crm_element_value_copy(xml_obj, "run-command");
     container_data->docker_run_options = crm_element_value_copy(xml_obj, "options");
     container_data->image = crm_element_value_copy(xml_obj, "image");
     container_data->docker_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         container_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         container_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         container_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         container_data->control_port = crm_element_value_copy(xml_obj, "control-port");
 
         for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
              xml_child = __xml_next_element(xml_child)) {
 
             container_port_t *port = calloc(1, sizeof(container_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 container_data->ports = g_list_append(container_data->ports, port);
 
             } else {
                 pe_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = __xml_first_child_element(xml_obj); xml_child != NULL;
          xml_child = __xml_next_element(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = 0;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             flags = 1;
         }
 
         if (source && target) {
             mount_add(container_data, source, target, options, flags);
         } else {
             pe_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(container_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         if(container_data->masters > 0) {
             xml_resource = create_xml_node(NULL, XML_CIB_TAG_MASTER);
 
         } else {
             xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
         }
 
         crm_xml_set_id(xml_resource, "%s-%s", container_data->prefix, xml_resource->name);
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", container_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = crm_itoa(container_data->replicas);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_MAX, value);
         free(value);
 
         value = crm_itoa(container_data->replicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_NODEMAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                 (container_data->replicas_per_host > 1)?
                 XML_BOOLEAN_TRUE : XML_BOOLEAN_FALSE);
 
         if(container_data->masters) {
             value = crm_itoa(container_data->masters);
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_MASTER_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, XML_ATTR_ID, container_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
                rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GListPtr childIter = NULL;
         resource_t *new_rsc = NULL;
         container_port_t *port = NULL;
         const char *key_loc = NULL;
 
         int offset = 0, max = 1024;
         char *buffer = NULL;
 
         if (common_unpack(xml_resource, &new_rsc, rsc, data_set) == FALSE) {
             pe_err("Failed unpacking resource %s", ID(rsc->xml));
             if (new_rsc != NULL && new_rsc->fns != NULL) {
                 new_rsc->fns->free(new_rsc);
             }
             return FALSE;
         }
 
         container_data->child = new_rsc;
 
         /* We map the remote authentication key (likely) used on the DC to the
          * default key location inside the container. This is only the likely
          * location because an actual connection will do some validity checking
          * on the file before using it.
          *
          * Mapping to the default location inside the container avoids having to
          * pass another environment variable to the container.
          *
          * This makes several assumptions:
          * - if PCMK_authkey_location is set, it has the same value on all nodes
          * - the container technology does not propagate host environment
          *   variables to the container
          * - the user does not set this environment variable via their container
          *   image
          *
          * @TODO A convoluted but possible way around the first limitation would
          *       be to allow a resource parameter to include environment
          *       variable references in its value, and resolve them on the
          *       executing node's crmd before sending the command to the lrmd.
          */
         key_loc = getenv("PCMK_authkey_location");
         if (key_loc == NULL) {
             key_loc = DEFAULT_REMOTE_KEY_LOCATION;
         }
         mount_add(container_data, key_loc, DEFAULT_REMOTE_KEY_LOCATION, NULL,
                   0);
 
         mount_add(container_data, CRM_LOG_DIR "/bundles", "/var/log", NULL, 1);
 
         port = calloc(1, sizeof(container_port_t));
         if(container_data->control_port) {
             port->source = strdup(container_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = crm_itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         container_data->ports = g_list_append(container_data->ports, port);
 
         buffer = calloc(1, max+1);
         for(childIter = container_data->child->children; childIter != NULL; childIter = childIter->next) {
             container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
             tuple->child = childIter->data;
             tuple->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if(is_set(tuple->child->flags, pe_rsc_notify)) {
                 set_bit(container_data->child->flags, pe_rsc_notify);
             }
 
             offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
             container_data->tuples = g_list_append(container_data->tuples, tuple);
             container_data->attribute_target = g_hash_table_lookup(tuple->child->meta, XML_RSC_ATTR_TARGET);
         }
         container_data->docker_host_options = buffer;
         if(container_data->attribute_target) {
             g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
             g_hash_table_replace(container_data->child->meta, strdup(XML_RSC_ATTR_TARGET), strdup(container_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         int offset = 0, max = 1024;
         char *buffer = calloc(1, max+1);
 
         for(int lpc = 0; lpc < container_data->replicas; lpc++) {
             container_grouping_t *tuple = calloc(1, sizeof(container_grouping_t));
             tuple->offset = lpc;
             offset += allocate_ip(container_data, tuple, buffer+offset, max-offset);
             container_data->tuples = g_list_append(container_data->tuples, tuple);
         }
 
         container_data->docker_host_options = buffer;
     }
 
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
         if (create_container(rsc, container_data, tuple, data_set) == FALSE) {
             pe_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
     }
 
     if(container_data->child) {
         rsc->children = g_list_append(rsc->children, container_data->child);
     }
     return TRUE;
 }
 
 static int
 tuple_rsc_active(resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 container_active(resource_t * rsc, gboolean all)
 {
     container_variant_data_t *container_data = NULL;
     GListPtr iter = NULL;
 
     get_container_variant_data(container_data, rsc);
     for (iter = container_data->tuples; iter != NULL; iter = iter->next) {
         container_grouping_t *tuple = (container_grouping_t *)(iter->data);
         int rsc_active;
 
         rsc_active = tuple_rsc_active(tuple->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = tuple_rsc_active(tuple->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = tuple_rsc_active(tuple->docker, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = tuple_rsc_active(tuple->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 resource_t *
 find_container_child(const char *stem, resource_t * rsc, node_t *node) 
 {
     container_variant_data_t *container_data = NULL;
     resource_t *parent = uber_parent(rsc);
     CRM_ASSERT(parent->parent);
 
     parent = parent->parent;
     get_container_variant_data(container_data, parent);
 
     if (is_not_set(rsc->flags, pe_rsc_unique)) {
         for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
             container_grouping_t *tuple = (container_grouping_t *)gIter->data;
 
             CRM_ASSERT(tuple);
             if(tuple->node->details == node->details) {
                 rsc = tuple->child;
                 break;
             }
         }
     }
 
     if (rsc && safe_str_neq(stem, rsc->id)) {
         free(rsc->clone_name);
         rsc->clone_name = strdup(stem);
     }
 
     return rsc;
 }
 
 static void
 print_rsc_in_list(resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 static const char*
 container_type_as_string(enum container_type t)
 {
     if (t == PE_CONTAINER_TYPE_DOCKER) {
         return PE_CONTAINER_TYPE_DOCKER_S;
     } else if (t == PE_CONTAINER_TYPE_RKT) {
         return PE_CONTAINER_TYPE_RKT_S;
     } else {
         return PE_CONTAINER_TYPE_UNKNOWN_S;
     }
 }
 
 static void
 container_print_xml(resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     container_variant_data_t *container_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_concat(pre_text, "       ", ' ');
 
     get_container_variant_data(container_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print("id=\"%s\" ", rsc->id);
-    status_print("type=\"%s\" ", container_type_as_string(container_data->type));
+
+    // Always lowercase the container technology type for use as XML value
+    status_print("type=\"");
+    for (const char *c = container_type_as_string(container_data->type);
+         *c; ++c) {
+        status_print("%c", tolower(*c));
+    }
+    status_print("\" ");
+
     status_print("image=\"%s\" ", container_data->image);
     status_print("unique=\"%s\" ", is_set(rsc->flags, pe_rsc_unique)? "true" : "false");
     status_print("managed=\"%s\" ", is_set(rsc->flags, pe_rsc_managed) ? "true" : "false");
     status_print("failed=\"%s\" ", is_set(rsc->flags, pe_rsc_failed) ? "true" : "false");
     status_print(">\n");
 
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
 
         CRM_ASSERT(tuple);
         status_print("%s    <replica id=\"%d\">\n", pre_text, tuple->offset);
         print_rsc_in_list(tuple->ip, child_text, options, print_data);
         print_rsc_in_list(tuple->child, child_text, options, print_data);
         print_rsc_in_list(tuple->docker, child_text, options, print_data);
         print_rsc_in_list(tuple->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
 static void
 tuple_print(container_grouping_t * tuple, const char *pre_text, long options, void *print_data)
 {
     node_t *node = NULL;
     resource_t *rsc = tuple->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = tuple->docker;
     }
 
     if(tuple->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", rsc_printable_id(tuple->docker));
     }
     if(tuple->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)", tuple->ipaddr);
     }
 
     if (tuple->docker->running_on) {
         node = tuple->docker->running_on->data;
     }
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 void
 container_print(resource_t * rsc, const char *pre_text, long options, void *print_data)
 {
     container_variant_data_t *container_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         container_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_container_variant_data(container_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%s%s container%s: %s [%s]%s%s\n",
                  pre_text, container_type_as_string(container_data->type),
                  container_data->replicas>1?" set":"", rsc->id, container_data->image,
                  is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GListPtr gIter = container_data->tuples; gIter != NULL; gIter = gIter->next) {
         container_grouping_t *tuple = (container_grouping_t *)gIter->data;
 
         CRM_ASSERT(tuple);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if(is_set(options, pe_print_clone_details)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if(g_list_length(container_data->tuples) > 1) {
                 status_print("  %sReplica[%d]\n", pre_text, tuple->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(tuple->ip, child_text, options, print_data);
             print_rsc_in_list(tuple->docker, child_text, options, print_data);
             print_rsc_in_list(tuple->remote, child_text, options, print_data);
             print_rsc_in_list(tuple->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             tuple_print(tuple, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 void
 tuple_free(container_grouping_t *tuple) 
 {
     if(tuple == NULL) {
         return;
     }
 
     if(tuple->node) {
         free(tuple->node);
         tuple->node = NULL;
     }
 
     if(tuple->ip) {
         free_xml(tuple->ip->xml);
         tuple->ip->xml = NULL;
         tuple->ip->fns->free(tuple->ip);
         tuple->ip->xml = NULL;
         free_xml(tuple->ip->xml);
         tuple->ip = NULL;
     }
     if(tuple->docker) {
         free_xml(tuple->docker->xml);
         tuple->docker->xml = NULL;
         tuple->docker->fns->free(tuple->docker);
         tuple->docker = NULL;
     }
     if(tuple->remote) {
         free_xml(tuple->remote->xml);
         tuple->remote->xml = NULL;
         tuple->remote->fns->free(tuple->remote);
         tuple->remote = NULL;
     }
     free(tuple->ipaddr);
     free(tuple);
 }
 
 void
 container_free(resource_t * rsc)
 {
     container_variant_data_t *container_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_container_variant_data(container_data, rsc);
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(container_data->prefix);
     free(container_data->image);
     free(container_data->control_port);
     free(container_data->host_network);
     free(container_data->host_netmask);
     free(container_data->ip_range_start);
     free(container_data->docker_network);
     free(container_data->docker_run_options);
     free(container_data->docker_run_command);
     free(container_data->docker_host_options);
 
     g_list_free_full(container_data->tuples, (GDestroyNotify)tuple_free);
     g_list_free_full(container_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(container_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(container_data->child) {
         free_xml(container_data->child->xml);
         container_data->child->xml = NULL;
         container_data->child->fns->free(container_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 container_resource_state(const resource_t * rsc, gboolean current)
 {
     enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pe_container)) {
         return 0;
     } else {
         container_variant_data_t *container_data = NULL;
 
         get_container_variant_data(container_data, rsc);
         return container_data->replicas;
     }
 }
diff --git a/lib/services/services.c b/lib/services/services.c
index d931614a53..8812d15c91 100644
--- a/lib/services/services.c
+++ b/lib/services/services.c
@@ -1,1453 +1,1478 @@
 /*
  * Copyright (C) 2010-2016 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <stdio.h>
 
 #include <errno.h>
 #include <unistd.h>
 #include <dirent.h>
 #include <fcntl.h>
 
 #include <crm/crm.h>
 #include <crm/common/mainloop.h>
 #include <crm/services.h>
 #include <crm/msg_xml.h>
 #include "services_private.h"
 
 #if SUPPORT_UPSTART
 #  include <upstart.h>
 #endif
 
 #if SUPPORT_SYSTEMD
 #  include <systemd.h>
 #endif
 
 /* TODO: Develop a rollover strategy */
 
 static int operations = 0;
 static GHashTable *recurring_actions = NULL;
 
 /* ops waiting to run async because of conflicting active
  * pending ops */
 static GList *blocked_ops = NULL;
 
 /* ops currently active (in-flight) */
 static GList *inflight_ops = NULL;
 
 static void handle_blocked_ops(void);
 
 svc_action_t *
 services_action_create(const char *name, const char *action, int interval, int timeout)
 {
     return resources_action_create(name, PCMK_RESOURCE_CLASS_LSB, NULL, name,
                                    action, interval, timeout, NULL, 0);
 }
 
 const char *
 resources_find_service_class(const char *agent)
 {
     /* Priority is:
      * - lsb
      * - systemd
      * - upstart
      */
     int rc = 0;
     struct stat st;
     char *path = NULL;
 
 #ifdef LSB_ROOT_DIR
     rc = asprintf(&path, "%s/%s", LSB_ROOT_DIR, agent);
     if (rc > 0 && stat(path, &st) == 0) {
         free(path);
         return PCMK_RESOURCE_CLASS_LSB;
     }
     free(path);
 #endif
 
 #if SUPPORT_SYSTEMD
     if (systemd_unit_exists(agent)) {
         return PCMK_RESOURCE_CLASS_SYSTEMD;
     }
 #endif
 
 #if SUPPORT_UPSTART
     if (upstart_job_exists(agent)) {
         return PCMK_RESOURCE_CLASS_UPSTART;
     }
 #endif
     return NULL;
 }
 
 static inline void
 init_recurring_actions(void)
 {
     if (recurring_actions == NULL) {
         recurring_actions = g_hash_table_new_full(g_str_hash, g_str_equal, NULL,
                                                   NULL);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether op is in-flight systemd or upstart op
  *
  * \param[in] op  Operation to check
  *
  * \return TRUE if op is in-flight systemd or upstart op
  */
 static inline gboolean
 inflight_systemd_or_upstart(svc_action_t *op)
 {
     return (safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD)
             || safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_UPSTART))
             && (g_list_find(inflight_ops, op) != NULL);
 }
 
 /*!
  * \internal
  * \brief Expand "service" alias to an actual resource class
  *
  * \param[in] rsc       Resource name (for logging only)
  * \param[in] standard  Resource class as configured
  * \param[in] agent     Agent name to look for
  *
  * \return Newly allocated string with actual resource class
  *
  * \note The caller is responsible for calling free() on the result.
  */
 static char *
 expand_resource_class(const char *rsc, const char *standard, const char *agent)
 {
     char *expanded_class = NULL;
 
     if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0) {
         const char *found_class = resources_find_service_class(agent);
 
         if (found_class) {
             crm_debug("Found %s agent %s for %s", found_class, agent, rsc);
             expanded_class = strdup(found_class);
         } else {
             crm_info("Assuming resource class lsb for agent %s for %s",
                      agent, rsc);
             expanded_class = strdup(PCMK_RESOURCE_CLASS_LSB);
         }
     } else {
         expanded_class = strdup(standard);
     }
     CRM_ASSERT(expanded_class);
     return expanded_class;
 }
 
 svc_action_t *
 resources_action_create(const char *name, const char *standard, const char *provider,
                         const char *agent, const char *action, int interval, int timeout,
                         GHashTable * params, enum svc_action_flags flags)
 {
     svc_action_t *op = NULL;
 
     /*
      * Do some up front sanity checks before we go off and
      * build the svc_action_t instance.
      */
 
     if (crm_strlen_zero(name)) {
         crm_err("Cannot create operation without resource name");
         goto return_error;
     }
 
     if (crm_strlen_zero(standard)) {
         crm_err("Cannot create operation for %s without resource class", name);
         goto return_error;
     }
 
     if (crm_provider_required(standard) && crm_strlen_zero(provider)) {
         crm_err("Cannot create OCF operation for %s without provider", name);
         goto return_error;
     }
 
     if (crm_strlen_zero(agent)) {
         crm_err("Cannot create operation for %s without agent name", name);
         goto return_error;
     }
 
     if (crm_strlen_zero(action)) {
         crm_err("Cannot create operation for %s without operation name", name);
         goto return_error;
     }
 
     /*
      * Sanity checks passed, proceed!
      */
 
     op = calloc(1, sizeof(svc_action_t));
     op->opaque = calloc(1, sizeof(svc_action_private_t));
     op->rsc = strdup(name);
     op->interval = interval;
     op->timeout = timeout;
     op->standard = expand_resource_class(name, standard, agent);
     op->agent = strdup(agent);
     op->sequence = ++operations;
     op->flags = flags;
     op->id = generate_op_key(name, action, interval);
 
     if (safe_str_eq(action, "monitor") && (
 #if SUPPORT_HEARTBEAT
         safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_HB) ||
 #endif
         safe_str_eq(op->standard, PCMK_RESOURCE_CLASS_LSB))) {
         action = "status";
     }
     op->action = strdup(action);
 
     if (crm_provider_required(op->standard)) {
         op->provider = strdup(provider);
         op->params = params;
         params = NULL;
 
         if (asprintf(&op->opaque->exec, "%s/resource.d/%s/%s", OCF_ROOT_DIR, provider, agent) == -1) {
             crm_err("Internal error: cannot create agent path");
             goto return_error;
         }
         op->opaque->args[0] = strdup(op->opaque->exec);
         op->opaque->args[1] = strdup(action);
 
     } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_LSB) == 0) {
         if (op->agent[0] == '/') {
             /* if given an absolute path, use that instead
              * of tacking on the LSB_ROOT_DIR path to the front */
             op->opaque->exec = strdup(op->agent);
         } else if (asprintf(&op->opaque->exec, "%s/%s", LSB_ROOT_DIR, op->agent) == -1) {
             crm_err("Internal error: cannot create agent path");
             goto return_error;
         }
         op->opaque->args[0] = strdup(op->opaque->exec);
         op->opaque->args[1] = strdup(op->action);
         op->opaque->args[2] = NULL;
 #if SUPPORT_HEARTBEAT
     } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_HB) == 0) {
         int index;
         int param_num;
         char buf_tmp[20];
         void *value_tmp;
 
         if (op->agent[0] == '/') {
             /* if given an absolute path, use that instead
              * of tacking on the HB_RA_DIR path to the front */
             op->opaque->exec = strdup(op->agent);
         } else if (asprintf(&op->opaque->exec, "%s/%s", HB_RA_DIR, op->agent) == -1) {
             crm_err("Internal error: cannot create agent path");
             goto return_error;
         }
         op->opaque->args[0] = strdup(op->opaque->exec);
 
         /* The "heartbeat" agent class only has positional arguments,
          * which we keyed by their decimal position number. */
         param_num = 1;
         if (params) {
             for (index = 1; index <= MAX_ARGC - 3; index++ ) {
                 snprintf(buf_tmp, sizeof(buf_tmp), "%d", index);
                 value_tmp = g_hash_table_lookup(params, buf_tmp);
                 if (value_tmp == NULL) {
                     /* maybe: strdup("") ??
                      * But the old lrmd did simply continue as well. */
                     continue;
                 }
                 op->opaque->args[param_num++] = strdup(value_tmp);
             }
         }
 
         /* Add operation code as the last argument, */
         /* and the terminating NULL pointer */
         op->opaque->args[param_num++] = strdup(op->action);
         op->opaque->args[param_num] = NULL;
 #endif
 #if SUPPORT_SYSTEMD
     } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) {
         op->opaque->exec = strdup("systemd-dbus");
 #endif
 #if SUPPORT_UPSTART
     } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_UPSTART) == 0) {
         op->opaque->exec = strdup("upstart-dbus");
 #endif
 #if SUPPORT_NAGIOS
     } else if (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_NAGIOS) == 0) {
         int index = 0;
 
         if (op->agent[0] == '/') {
             /* if given an absolute path, use that instead
              * of tacking on the NAGIOS_PLUGIN_DIR path to the front */
             op->opaque->exec = strdup(op->agent);
 
         } else if (asprintf(&op->opaque->exec, "%s/%s", NAGIOS_PLUGIN_DIR, op->agent) == -1) {
             crm_err("Internal error: cannot create agent path");
             goto return_error;
         }
 
         op->opaque->args[0] = strdup(op->opaque->exec);
         index = 1;
 
         if (safe_str_eq(op->action, "monitor") && op->interval == 0) {
             /* Invoke --version for a nagios probe */
             op->opaque->args[index] = strdup("--version");
             index++;
 
         } else if (params) {
             GHashTableIter iter;
             char *key = NULL;
             char *value = NULL;
             static int args_size = sizeof(op->opaque->args) / sizeof(char *);
 
             g_hash_table_iter_init(&iter, params);
 
             while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value) &&
                    index <= args_size - 3) {
                 int len = 3;
                 char *long_opt = NULL;
 
                 if (safe_str_eq(key, XML_ATTR_CRM_VERSION) || strstr(key, CRM_META "_")) {
                     continue;
                 }
 
                 len += strlen(key);
                 long_opt = calloc(1, len);
                 sprintf(long_opt, "--%s", key);
                 long_opt[len - 1] = 0;
 
                 op->opaque->args[index] = long_opt;
                 op->opaque->args[index + 1] = strdup(value);
                 index += 2;
             }
         }
         op->opaque->args[index] = NULL;
 #endif
     } else {
         crm_err("Unknown resource standard: %s", op->standard);
         services_action_free(op);
         op = NULL;
     }
 
     if(params) {
         g_hash_table_destroy(params);
     }
     return op;
 
   return_error:
     if(params) {
         g_hash_table_destroy(params);
     }
     services_action_free(op);
 
     return NULL;
 }
 
 svc_action_t *
 services_action_create_generic(const char *exec, const char *args[])
 {
     svc_action_t *op;
     unsigned int cur_arg;
 
     op = calloc(1, sizeof(*op));
     op->opaque = calloc(1, sizeof(svc_action_private_t));
 
     op->opaque->exec = strdup(exec);
     op->opaque->args[0] = strdup(exec);
 
     for (cur_arg = 1; args && args[cur_arg - 1]; cur_arg++) {
         op->opaque->args[cur_arg] = strdup(args[cur_arg - 1]);
 
         if (cur_arg == DIMOF(op->opaque->args) - 1) {
             crm_err("svc_action_t args list not long enough for '%s' execution request.", exec);
             break;
         }
     }
 
     return op;
 }
 
 /*!
  * \brief Create an alert agent action
  *
  * \param[in] id        Alert ID
  * \param[in] exec      Path to alert agent executable
  * \param[in] timeout   Action timeout
  * \param[in] params    Parameters to use with action
  * \param[in] sequence  Action sequence number
  * \param[in] cb_data   Data to pass to callback function
  *
  * \return New action on success, NULL on error
  * \note It is the caller's responsibility to free cb_data.
  *       The caller should not free params explicitly.
  */
 svc_action_t *
 services_alert_create(const char *id, const char *exec, int timeout,
                       GHashTable *params, int sequence, void *cb_data)
 {
     svc_action_t *action = services_action_create_generic(exec, NULL);
 
     CRM_ASSERT(action);
     action->timeout = timeout;
     action->id = strdup(id);
     action->params = params;
     action->sequence = sequence;
     action->cb_data = cb_data;
     return action;
 }
 
 /*!
  * \brief Set the user and group that an action will execute as
  *
  * \param[in,out] action  Action to modify
  * \param[in]     user    Name of user to execute action as
  * \param[in]     group   Name of group to execute action as
  *
  * \return pcmk_ok on success, -errno otherwise
  *
  * \note This will have no effect unless the process executing the action runs
  *       as root, and the action is not a systemd or upstart action.
  *       We could implement this for systemd by adding User= and Group= to
  *       [Service] in the override file, but that seems more likely to cause
  *       problems than be useful.
  */
 int
 services_action_user(svc_action_t *op, const char *user)
 {
     CRM_CHECK((op != NULL) && (user != NULL), return -EINVAL);
     return crm_user_lookup(user, &(op->opaque->uid), &(op->opaque->gid));
 }
 
 static void
 set_alert_env(gpointer key, gpointer value, gpointer user_data)
 {
     int rc;
 
     if (value) {
         rc = setenv(key, value, 1);
     } else {
         rc = unsetenv(key);
     }
 
     if (rc < 0) {
         crm_perror(LOG_ERR, "setenv %s=%s",
                   (char*)key, (value? (char*)value : ""));
     } else {
         crm_trace("setenv %s=%s", (char*)key, (value? (char*)value : ""));
     }
 }
 
 static void
 unset_alert_env(gpointer key, gpointer value, gpointer user_data)
 {
     if (unsetenv(key) < 0) {
         crm_perror(LOG_ERR, "unset %s", (char*)key);
     } else {
         crm_trace("unset %s", (char*)key);
     }
 }
 
 /*!
  * \brief Execute an alert agent action
  *
  * \param[in] action  Action to execute
  * \param[in] cb      Function to call when action completes
  *
  * \return TRUE if the library will free action, FALSE otherwise
  *
  * \note If this function returns FALSE, it is the caller's responsibility to
  *       free the action with services_action_free().
  */
 gboolean
 services_alert_async(svc_action_t *action, void (*cb)(svc_action_t *op))
 {
     gboolean responsible;
 
     action->synchronous = false;
     action->opaque->callback = cb;
     if (action->params) {
         g_hash_table_foreach(action->params, set_alert_env, NULL);
     }
     responsible = services_os_action_execute(action);
     if (action->params) {
         g_hash_table_foreach(action->params, unset_alert_env, NULL);
     }
     return responsible;
 }
 
 #if SUPPORT_DBUS
 /*!
  * \internal
  * \brief Update operation's pending DBus call, unreferencing old one if needed
  *
  * \param[in,out] op       Operation to modify
  * \param[in]     pending  Pending call to set
  */
 void
 services_set_op_pending(svc_action_t *op, DBusPendingCall *pending)
 {
     if (op->opaque->pending && (op->opaque->pending != pending)) {
         if (pending) {
             crm_info("Lost pending %s DBus call (%p)", op->id, op->opaque->pending);
         } else {
             crm_trace("Done with pending %s DBus call (%p)", op->id, op->opaque->pending);
         }
         dbus_pending_call_unref(op->opaque->pending);
     }
     op->opaque->pending = pending;
     if (pending) {
         crm_trace("Updated pending %s DBus call (%p)", op->id, pending);
     } else {
         crm_trace("Cleared pending %s DBus call", op->id);
     }
 }
 #endif
 
 void
 services_action_cleanup(svc_action_t * op)
 {
     if(op->opaque == NULL) {
         return;
     }
 
 #if SUPPORT_DBUS
     if(op->opaque->timerid != 0) {
         crm_trace("Removing timer for call %s to %s", op->action, op->rsc);
         g_source_remove(op->opaque->timerid);
         op->opaque->timerid = 0;
     }
 
     if(op->opaque->pending) {
         crm_trace("Cleaning up pending dbus call %p %s for %s", op->opaque->pending, op->action, op->rsc);
         if(dbus_pending_call_get_completed(op->opaque->pending)) {
             crm_warn("Pending dbus call %s for %s did not complete", op->action, op->rsc);
         }
         dbus_pending_call_cancel(op->opaque->pending);
         dbus_pending_call_unref(op->opaque->pending);
         op->opaque->pending = NULL;
     }
 #endif
 
     if (op->opaque->stderr_gsource) {
         mainloop_del_fd(op->opaque->stderr_gsource);
         op->opaque->stderr_gsource = NULL;
     }
 
     if (op->opaque->stdout_gsource) {
         mainloop_del_fd(op->opaque->stdout_gsource);
         op->opaque->stdout_gsource = NULL;
     }
 }
 
 void
 services_action_free(svc_action_t * op)
 {
     unsigned int i;
 
     if (op == NULL) {
         return;
     }
 
     /* The operation should be removed from all tracking lists by this point.
      * If it's not, we have a bug somewhere, so bail. That may lead to a
      * memory leak, but it's better than a use-after-free segmentation fault.
      */
     CRM_CHECK(g_list_find(inflight_ops, op) == NULL, return);
     CRM_CHECK(g_list_find(blocked_ops, op) == NULL, return);
     CRM_CHECK((recurring_actions == NULL)
               || (g_hash_table_lookup(recurring_actions, op->id) == NULL),
               return);
 
     services_action_cleanup(op);
 
     if (op->opaque->repeat_timer) {
         g_source_remove(op->opaque->repeat_timer);
         op->opaque->repeat_timer = 0;
     }
 
     free(op->id);
     free(op->opaque->exec);
 
     for (i = 0; i < DIMOF(op->opaque->args); i++) {
         free(op->opaque->args[i]);
     }
 
     free(op->opaque);
     free(op->rsc);
     free(op->action);
 
     free(op->standard);
     free(op->agent);
     free(op->provider);
 
     free(op->stdout_data);
     free(op->stderr_data);
 
     if (op->params) {
         g_hash_table_destroy(op->params);
         op->params = NULL;
     }
 
     free(op);
 }
 
 gboolean
 cancel_recurring_action(svc_action_t * op)
 {
     crm_info("Cancelling %s operation %s", op->standard, op->id);
 
     if (recurring_actions) {
         g_hash_table_remove(recurring_actions, op->id);
     }
 
     if (op->opaque->repeat_timer) {
         g_source_remove(op->opaque->repeat_timer);
         op->opaque->repeat_timer = 0;
     }
 
     return TRUE;
 }
 
 /*!
  * \brief Cancel a recurring action
  *
  * \param[in] name      Name of resource that operation is for
  * \param[in] action    Name of operation to cancel
  * \param[in] interval  Interval of operation to cancel
  *
  * \return TRUE if action was successfully cancelled, FALSE otherwise
  */
 gboolean
 services_action_cancel(const char *name, const char *action, int interval)
 {
     gboolean cancelled = FALSE;
     char *id = generate_op_key(name, action, interval);
     svc_action_t *op = NULL;
 
     /* We can only cancel a recurring action */
     init_recurring_actions();
     op = g_hash_table_lookup(recurring_actions, id);
     if (op == NULL) {
         goto done;
     }
 
     /* Tell operation_finalize() not to reschedule the operation */
     op->cancel = TRUE;
 
     /* Stop tracking it as a recurring operation, and stop its timer */
     cancel_recurring_action(op);
 
     /* If the op has a PID, it's an in-flight child process, so kill it.
      *
      * Whether the kill succeeds or fails, the main loop will send the op to
      * operation_finished() (and thus operation_finalize()) when the process
      * goes away.
      */
     if (op->pid != 0) {
         crm_info("Terminating in-flight op %s (pid %d) early because it was cancelled",
                  id, op->pid);
         cancelled = mainloop_child_kill(op->pid);
         if (cancelled == FALSE) {
             crm_err("Termination of %s (pid %d) failed", id, op->pid);
         }
         goto done;
     }
 
     /* In-flight systemd and upstart ops don't have a pid. The relevant handlers
      * will call operation_finalize() when the operation completes.
      * @TODO: Can we request early termination, maybe using
      * dbus_pending_call_cancel()?
      */
     if (inflight_systemd_or_upstart(op)) {
         crm_info("Will cancel %s op %s when in-flight instance completes",
                  op->standard, op->id);
         cancelled = FALSE;
         goto done;
     }
 
     /* Otherwise, operation is not in-flight, just report as cancelled */
     op->status = PCMK_LRM_OP_CANCELLED;
     if (op->opaque->callback) {
         op->opaque->callback(op);
     }
 
     blocked_ops = g_list_remove(blocked_ops, op);
     services_action_free(op);
     cancelled = TRUE;
 
 done:
     free(id);
     return cancelled;
 }
 
 gboolean
 services_action_kick(const char *name, const char *action, int interval /* ms */)
 {
     svc_action_t * op = NULL;
     char *id = generate_op_key(name, action, interval);
 
     init_recurring_actions();
     op = g_hash_table_lookup(recurring_actions, id);
     free(id);
 
     if (op == NULL) {
         return FALSE;
     }
 
 
     if (op->pid || inflight_systemd_or_upstart(op)) {
         return TRUE;
     } else {
         if (op->opaque->repeat_timer) {
             g_source_remove(op->opaque->repeat_timer);
             op->opaque->repeat_timer = 0;
         }
         recurring_action_timer(op);
         return TRUE;
     }
 
 }
 
 /*!
  * \internal
  * \brief Add a new recurring operation, checking for duplicates
  *
  * \param[in] op               Operation to add
  *
  * \return TRUE if duplicate found (and reschedule), FALSE otherwise
  */
 static gboolean
 handle_duplicate_recurring(svc_action_t * op)
 {
     svc_action_t * dup = NULL;
 
     /* check for duplicates */
     dup = g_hash_table_lookup(recurring_actions, op->id);
 
     if (dup && (dup != op)) {
         /* update user data */
         if (op->opaque->callback) {
             dup->opaque->callback = op->opaque->callback;
             dup->cb_data = op->cb_data;
             op->cb_data = NULL;
         }
         /* immediately execute the next interval */
         if (dup->pid != 0) {
             if (op->opaque->repeat_timer) {
                 g_source_remove(op->opaque->repeat_timer);
                 op->opaque->repeat_timer = 0;
             }
             recurring_action_timer(dup);
         }
         /* free the duplicate */
         services_action_free(op);
         return TRUE;
     }
 
     return FALSE;
 }
 
 inline static gboolean
 action_exec_helper(svc_action_t * op)
 {
     /* Whether a/synchronous must be decided (op->synchronous) beforehand. */
     if (op->standard
         && (strcasecmp(op->standard, PCMK_RESOURCE_CLASS_UPSTART) == 0)) {
 #if SUPPORT_UPSTART
         return upstart_job_exec(op);
 #endif
     } else if (op->standard && strcasecmp(op->standard,
                                           PCMK_RESOURCE_CLASS_SYSTEMD) == 0) {
 #if SUPPORT_SYSTEMD
         return systemd_unit_exec(op);
 #endif
     } else {
         return services_os_action_execute(op);
     }
     /* The 'op' has probably been freed if the execution functions return TRUE
        for the asynchronous 'op'. */
     /* Avoid using the 'op' in here. */
 
     return FALSE;
 }
 
 void
 services_add_inflight_op(svc_action_t * op)
 {
     if (op == NULL) {
         return;
     }
 
     CRM_ASSERT(op->synchronous == FALSE);
 
     /* keep track of ops that are in-flight to avoid collisions in the same namespace */
     if (op->rsc) {
         inflight_ops = g_list_append(inflight_ops, op);
     }
 }
 
 /*!
  * \internal
  * \brief Stop tracking an operation that completed
  *
  * \param[in] op  Operation to stop tracking
  */
 void
 services_untrack_op(svc_action_t *op)
 {
     /* Op is no longer in-flight or blocked */
     inflight_ops = g_list_remove(inflight_ops, op);
     blocked_ops = g_list_remove(blocked_ops, op);
 
     /* Op is no longer blocking other ops, so check if any need to run */
     handle_blocked_ops();
 }
 
 gboolean
 services_action_async(svc_action_t * op, void (*action_callback) (svc_action_t *))
 {
     op->synchronous = false;
     if (action_callback) {
         op->opaque->callback = action_callback;
     }
 
     if (op->interval > 0) {
         init_recurring_actions();
         if (handle_duplicate_recurring(op) == TRUE) {
             /* entry rescheduled, dup freed */
             /* exit early */
             return TRUE;
         }
         g_hash_table_replace(recurring_actions, op->id, op);
     }
 
     if (op->rsc && is_op_blocked(op->rsc)) {
         blocked_ops = g_list_append(blocked_ops, op);
         return TRUE;
     }
 
     return action_exec_helper(op);
 }
 
 
 static gboolean processing_blocked_ops = FALSE;
 
 gboolean
 is_op_blocked(const char *rsc)
 {
     GList *gIter = NULL;
     svc_action_t *op = NULL;
 
     for (gIter = inflight_ops; gIter != NULL; gIter = gIter->next) {
         op = gIter->data;
         if (safe_str_eq(op->rsc, rsc)) {
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 static void
 handle_blocked_ops(void)
 {
     GList *executed_ops = NULL;
     GList *gIter = NULL;
     svc_action_t *op = NULL;
     gboolean res = FALSE;
 
     if (processing_blocked_ops) {
         /* avoid nested calling of this function */
         return;
     }
 
     processing_blocked_ops = TRUE;
 
     /* n^2 operation here, but blocked ops are incredibly rare. this list
      * will be empty 99% of the time. */
     for (gIter = blocked_ops; gIter != NULL; gIter = gIter->next) {
         op = gIter->data;
         if (is_op_blocked(op->rsc)) {
             continue;
         }
         executed_ops = g_list_append(executed_ops, op);
         res = action_exec_helper(op);
         if (res == FALSE) {
             op->status = PCMK_LRM_OP_ERROR;
             /* this can cause this function to be called recursively
              * which is why we have processing_blocked_ops static variable */
             operation_finalize(op);
         }
     }
 
     for (gIter = executed_ops; gIter != NULL; gIter = gIter->next) {
         op = gIter->data;
         blocked_ops = g_list_remove(blocked_ops, op);
     }
     g_list_free(executed_ops);
 
     processing_blocked_ops = FALSE;
 }
 
 #define lsb_metadata_template  \
     "<?xml version='1.0'?>\n"                                           \
     "<!DOCTYPE resource-agent SYSTEM 'ra-api-1.dtd'>\n"                 \
     "<resource-agent name='%s' version='" PCMK_DEFAULT_AGENT_VERSION "'>\n" \
     "  <version>1.0</version>\n"                                        \
     "  <longdesc lang='en'>\n"                                          \
-    "    %s\n"                                                          \
+    "%s"                                                                \
     "  </longdesc>\n"                                                   \
     "  <shortdesc lang='en'>%s</shortdesc>\n"                           \
     "  <parameters>\n"                                                  \
     "  </parameters>\n"                                                 \
     "  <actions>\n"                                                     \
     "    <action name='meta-data'    timeout='5' />\n"                  \
     "    <action name='start'        timeout='15' />\n"                 \
     "    <action name='stop'         timeout='15' />\n"                 \
     "    <action name='status'       timeout='15' />\n"                 \
     "    <action name='restart'      timeout='15' />\n"                 \
     "    <action name='force-reload' timeout='15' />\n"                 \
     "    <action name='monitor'      timeout='15' interval='15' />\n"   \
     "  </actions>\n"                                                    \
     "  <special tag='LSB'>\n"                                           \
     "    <Provides>%s</Provides>\n"                                     \
     "    <Required-Start>%s</Required-Start>\n"                         \
     "    <Required-Stop>%s</Required-Stop>\n"                           \
     "    <Should-Start>%s</Should-Start>\n"                             \
     "    <Should-Stop>%s</Should-Stop>\n"                               \
     "    <Default-Start>%s</Default-Start>\n"                           \
     "    <Default-Stop>%s</Default-Stop>\n"                             \
     "  </special>\n"                                                    \
     "</resource-agent>\n"
 
+/* See "Comment Conventions for Init Scripts" in the LSB core specification at:
+ * http://refspecs.linuxfoundation.org/lsb.shtml
+ */
 #define LSB_INITSCRIPT_INFOBEGIN_TAG "### BEGIN INIT INFO"
 #define LSB_INITSCRIPT_INFOEND_TAG "### END INIT INFO"
 #define PROVIDES    "# Provides:"
 #define REQ_START   "# Required-Start:"
 #define REQ_STOP    "# Required-Stop:"
 #define SHLD_START  "# Should-Start:"
 #define SHLD_STOP   "# Should-Stop:"
 #define DFLT_START  "# Default-Start:"
 #define DFLT_STOP   "# Default-Stop:"
 #define SHORT_DSCR  "# Short-Description:"
 #define DESCRIPTION "# Description:"
 
 #define lsb_meta_helper_free_value(m)           \
     do {                                        \
         if ((m) != NULL) {                      \
             xmlFree(m);                         \
             (m) = NULL;                         \
         }                                       \
     } while(0)
 
 /*!
  * \internal
  * \brief Grab an LSB header value
  *
  * \param[in]     line    Line read from LSB init script
  * \param[in,out] value   If not set, will be set to XML-safe copy of value
  * \param[in]     prefix  Set value if line starts with this pattern
  *
  * \return TRUE if value was set, FALSE otherwise
  */
 static inline gboolean
 lsb_meta_helper_get_value(const char *line, char **value, const char *prefix)
 {
-    if (!*value && !strncasecmp(line, prefix, strlen(prefix))) {
+    if (!*value && !strncmp(line, prefix, strlen(prefix))) {
         *value = (char *)xmlEncodeEntitiesReentrant(NULL, BAD_CAST line+strlen(prefix));
         return TRUE;
     }
     return FALSE;
 }
 
 #define DESC_MAX 2048
 
 static int
 lsb_get_metadata(const char *type, char **output)
 {
     char ra_pathname[PATH_MAX] = { 0, };
-    FILE *fp;
-    char buffer[1024];
+    FILE *fp = NULL;
+    char buffer[1024] = { 0, };
     char *provides = NULL;
     char *req_start = NULL;
     char *req_stop = NULL;
     char *shld_start = NULL;
     char *shld_stop = NULL;
     char *dflt_start = NULL;
     char *dflt_stop = NULL;
     char *s_dscrpt = NULL;
     char *xml_l_dscrpt = NULL;
     int offset = 0;
-    char description[DESC_MAX];
+    bool in_header = FALSE;
+    char description[DESC_MAX] = { 0, };
 
     if (type[0] == '/') {
         snprintf(ra_pathname, sizeof(ra_pathname), "%s", type);
     } else {
         snprintf(ra_pathname, sizeof(ra_pathname), "%s/%s",
                  LSB_ROOT_DIR, type);
     }
 
     crm_trace("Looking into %s", ra_pathname);
     fp = fopen(ra_pathname, "r");
     if (fp == NULL) {
         return -errno;
     }
 
     /* Enter into the LSB-compliant comment block */
     while (fgets(buffer, sizeof(buffer), fp)) {
 
+        // Ignore lines up to and including the block delimiter
+        if (!strncmp(buffer, LSB_INITSCRIPT_INFOBEGIN_TAG,
+                     strlen(LSB_INITSCRIPT_INFOBEGIN_TAG))) {
+            in_header = TRUE;
+            continue;
+        }
+        if (!in_header) {
+            continue;
+        }
+
         /* Assume each of the following eight arguments contain one line */
         if (lsb_meta_helper_get_value(buffer, &provides, PROVIDES)) {
             continue;
         }
         if (lsb_meta_helper_get_value(buffer, &req_start, REQ_START)) {
             continue;
         }
         if (lsb_meta_helper_get_value(buffer, &req_stop, REQ_STOP)) {
             continue;
         }
         if (lsb_meta_helper_get_value(buffer, &shld_start, SHLD_START)) {
             continue;
         }
         if (lsb_meta_helper_get_value(buffer, &shld_stop, SHLD_STOP)) {
             continue;
         }
         if (lsb_meta_helper_get_value(buffer, &dflt_start, DFLT_START)) {
             continue;
         }
         if (lsb_meta_helper_get_value(buffer, &dflt_stop, DFLT_STOP)) {
             continue;
         }
         if (lsb_meta_helper_get_value(buffer, &s_dscrpt, SHORT_DSCR)) {
             continue;
         }
 
         /* Long description may cross multiple lines */
-        if ((offset == 0)
-            && !strncasecmp(buffer, DESCRIPTION, strlen(DESCRIPTION))) {
-            /* Between # and keyword, more than one space, or a tab
-             * character, indicates the continuation line.
-             *
-             * Extracted from LSB init script standard
-             */
+        if ((offset == 0) // haven't already found long description
+            && !strncmp(buffer, DESCRIPTION, strlen(DESCRIPTION))) {
+            bool processed_line = TRUE;
+
+            // Get remainder of description line itself
+            offset += snprintf(description, DESC_MAX, "%s",
+                               buffer + strlen(DESCRIPTION));
+
+            // Read any continuation lines of the description
+            buffer[0] = '\0';
             while (fgets(buffer, sizeof(buffer), fp)) {
                 if (!strncmp(buffer, "#  ", 3) || !strncmp(buffer, "#\t", 2)) {
-                    buffer[0] = ' ';
+                    /* '#' followed by a tab or more than one space indicates a
+                     * continuation of the long description.
+                     */
                     offset += snprintf(description + offset, DESC_MAX - offset,
-                                       "%s", buffer);
+                                       "%s", buffer + 1);
                 } else {
-                    fputs(buffer, fp);
-                    break;      /* Long description ends */
+                    /* This line is not part of the long description,
+                     * so continue with normal processing.
+                     */
+                    processed_line = FALSE;
+                    break;
                 }
             }
-            continue;
-        }
 
-        if ((xml_l_dscrpt == NULL) && (offset > 0)) {
+            // Make long description safe to use in XML
             xml_l_dscrpt = (char *)xmlEncodeEntitiesReentrant(NULL, BAD_CAST(description));
+
+            if (processed_line) {
+                // We grabbed the line into the long description
+                continue;
+            }
         }
 
-        if (!strncasecmp(buffer, LSB_INITSCRIPT_INFOEND_TAG,
-                         strlen(LSB_INITSCRIPT_INFOEND_TAG))) {
-            /* Get to the out border of LSB comment block */
+        // Stop if we leave the header block
+        if (!strncmp(buffer, LSB_INITSCRIPT_INFOEND_TAG,
+                     strlen(LSB_INITSCRIPT_INFOEND_TAG))) {
             break;
         }
         if (buffer[0] != '#') {
-            break;              /* Out of comment block in the beginning */
+            break;
         }
     }
     fclose(fp);
 
     *output = crm_strdup_printf(lsb_metadata_template, type,
                                 (xml_l_dscrpt? xml_l_dscrpt : type),
                                 (s_dscrpt? s_dscrpt : type),
                                 (provides? provides : ""),
                                 (req_start? req_start : ""),
                                 (req_stop? req_stop : ""),
                                 (shld_start? shld_start : ""),
                                 (shld_stop? shld_stop : ""),
                                 (dflt_start? dflt_start : ""),
                                 (dflt_stop? dflt_stop : ""));
 
     lsb_meta_helper_free_value(xml_l_dscrpt);
     lsb_meta_helper_free_value(s_dscrpt);
     lsb_meta_helper_free_value(provides);
     lsb_meta_helper_free_value(req_start);
     lsb_meta_helper_free_value(req_stop);
     lsb_meta_helper_free_value(shld_start);
     lsb_meta_helper_free_value(shld_stop);
     lsb_meta_helper_free_value(dflt_start);
     lsb_meta_helper_free_value(dflt_stop);
 
     crm_trace("Created fake metadata: %llu",
               (unsigned long long) strlen(*output));
     return pcmk_ok;
 }
 
 #if SUPPORT_NAGIOS
 static int
 nagios_get_metadata(const char *type, char **output)
 {
     int rc = pcmk_ok;
     FILE *file_strm = NULL;
     int start = 0, length = 0, read_len = 0;
     char *metadata_file = NULL;
     int len = 36;
 
     len += strlen(NAGIOS_METADATA_DIR);
     len += strlen(type);
     metadata_file = calloc(1, len);
     CRM_CHECK(metadata_file != NULL, return -ENOMEM);
 
     sprintf(metadata_file, "%s/%s.xml", NAGIOS_METADATA_DIR, type);
     file_strm = fopen(metadata_file, "r");
     if (file_strm == NULL) {
         crm_err("Metadata file %s does not exist", metadata_file);
         free(metadata_file);
         return -EIO;
     }
 
     /* see how big the file is */
     start = ftell(file_strm);
     fseek(file_strm, 0L, SEEK_END);
     length = ftell(file_strm);
     fseek(file_strm, 0L, start);
 
     CRM_ASSERT(length >= 0);
     CRM_ASSERT(start == ftell(file_strm));
 
     if (length <= 0) {
         crm_info("%s was not valid", metadata_file);
         free(*output);
         *output = NULL;
         rc = -EIO;
 
     } else {
         crm_trace("Reading %d bytes from file", length);
         *output = calloc(1, (length + 1));
         read_len = fread(*output, 1, length, file_strm);
         if (read_len != length) {
             crm_err("Calculated and read bytes differ: %d vs. %d",
                     length, read_len);
             free(*output);
             *output = NULL;
             rc = -EIO;
         }
     }
 
     fclose(file_strm);
     free(metadata_file);
     return rc;
 }
 #endif
 
 #if SUPPORT_HEARTBEAT
 /* strictly speaking, support for class=heartbeat style scripts
  * does not require "heartbeat support" to be enabled.
  * But since those scripts are part of the "heartbeat" package usually,
  * and are very unlikely to be present in any other deployment,
  * I leave it inside this ifdef.
  *
  * Yes, I know, these are legacy and should die,
  * or at least be rewritten to be a proper OCF style agent.
  * But they exist, and custom scripts following these rules do, too.
  *
  * Taken from the old "glue" lrmd, see
  * http://hg.linux-ha.org/glue/file/0a7add1d9996/lib/plugins/lrm/raexechb.c#l49
  * http://hg.linux-ha.org/glue/file/0a7add1d9996/lib/plugins/lrm/raexechb.c#l393
  */
 
 static const char hb_metadata_template[] =
     "<?xml version='1.0'?>\n"
     "<!DOCTYPE resource-agent SYSTEM 'ra-api-1.dtd'>\n"
     "<resource-agent name='%s' version='" PCMK_DEFAULT_AGENT_VERSION "'>\n"
     "<version>1.0</version>\n"
     "<longdesc lang='en'>\n"
     "%s"
     "</longdesc>\n"
     "<shortdesc lang='en'>%s</shortdesc>\n"
     "<parameters>\n"
     "<parameter name='1' unique='1' required='0'>\n"
     "<longdesc lang='en'>\n"
     "This argument will be passed as the first argument to the "
     "heartbeat resource agent (assuming it supports one)\n"
     "</longdesc>\n"
     "<shortdesc lang='en'>argv[1]</shortdesc>\n"
     "<content type='string' default=' ' />\n"
     "</parameter>\n"
     "<parameter name='2' unique='1' required='0'>\n"
     "<longdesc lang='en'>\n"
     "This argument will be passed as the second argument to the "
     "heartbeat resource agent (assuming it supports one)\n"
     "</longdesc>\n"
     "<shortdesc lang='en'>argv[2]</shortdesc>\n"
     "<content type='string' default=' ' />\n"
     "</parameter>\n"
     "<parameter name='3' unique='1' required='0'>\n"
     "<longdesc lang='en'>\n"
     "This argument will be passed as the third argument to the "
     "heartbeat resource agent (assuming it supports one)\n"
     "</longdesc>\n"
     "<shortdesc lang='en'>argv[3]</shortdesc>\n"
     "<content type='string' default=' ' />\n"
     "</parameter>\n"
     "<parameter name='4' unique='1' required='0'>\n"
     "<longdesc lang='en'>\n"
     "This argument will be passed as the fourth argument to the "
     "heartbeat resource agent (assuming it supports one)\n"
     "</longdesc>\n"
     "<shortdesc lang='en'>argv[4]</shortdesc>\n"
     "<content type='string' default=' ' />\n"
     "</parameter>\n"
     "<parameter name='5' unique='1' required='0'>\n"
     "<longdesc lang='en'>\n"
     "This argument will be passed as the fifth argument to the "
     "heartbeat resource agent (assuming it supports one)\n"
     "</longdesc>\n"
     "<shortdesc lang='en'>argv[5]</shortdesc>\n"
     "<content type='string' default=' ' />\n"
     "</parameter>\n"
     "</parameters>\n"
     "<actions>\n"
     "<action name='start'   timeout='15' />\n"
     "<action name='stop'    timeout='15' />\n"
     "<action name='status'  timeout='15' />\n"
     "<action name='monitor' timeout='15' interval='15' start-delay='15' />\n"
     "<action name='meta-data'  timeout='5' />\n"
     "</actions>\n"
     "<special tag='heartbeat'>\n"
     "</special>\n"
     "</resource-agent>\n";
 
 static int
 heartbeat_get_metadata(const char *type, char **output)
 {
     *output = crm_strdup_printf(hb_metadata_template, type, type, type);
     crm_trace("Created fake metadata: %llu",
               (unsigned long long) strlen(*output));
     return pcmk_ok;
 }
 #endif
 
 static gboolean
 action_get_metadata(svc_action_t *op)
 {
     const char *class = op->standard;
 
     if (op->agent == NULL) {
         crm_err("meta-data requested without specifying agent");
         return FALSE;
     }
 
     if (class == NULL) {
         crm_err("meta-data requested for agent %s without specifying class",
                 op->agent);
         return FALSE;
     }
 
     if (!strcmp(class, PCMK_RESOURCE_CLASS_SERVICE)) {
         class = resources_find_service_class(op->agent);
     }
 
     if (class == NULL) {
         crm_err("meta-data requested for %s, but could not determine class",
                 op->agent);
         return FALSE;
     }
 
     if (safe_str_eq(class, PCMK_RESOURCE_CLASS_LSB)) {
         return (lsb_get_metadata(op->agent, &op->stdout_data) >= 0);
     }
 
 #if SUPPORT_NAGIOS
     if (safe_str_eq(class, PCMK_RESOURCE_CLASS_NAGIOS)) {
         return (nagios_get_metadata(op->agent, &op->stdout_data) >= 0);
     }
 #endif
 
 #if SUPPORT_HEARTBEAT
     if (safe_str_eq(class, PCMK_RESOURCE_CLASS_HB)) {
         return (heartbeat_get_metadata(op->agent, &op->stdout_data) >= 0);
     }
 #endif
 
     return action_exec_helper(op);
 }
 
 gboolean
 services_action_sync(svc_action_t * op)
 {
     gboolean rc = TRUE;
 
     if (op == NULL) {
         crm_trace("No operation to execute");
         return FALSE;
     }
 
     op->synchronous = true;
 
     if (safe_str_eq(op->action, "meta-data")) {
         /* Synchronous meta-data operations are handled specially. Since most
          * resource classes don't provide any meta-data, it has to be
          * synthesized from available information about the agent.
          *
          * services_action_async() doesn't treat meta-data actions specially, so
          * it will result in an error for classes that don't support the action.
          */
         rc = action_get_metadata(op);
     } else {
         rc = action_exec_helper(op);
     }
     crm_trace(" > %s_%s_%d: %s = %d",
               op->rsc, op->action, op->interval, op->opaque->exec, op->rc);
     if (op->stdout_data) {
         crm_trace(" >  stdout: %s", op->stdout_data);
     }
     if (op->stderr_data) {
         crm_trace(" >  stderr: %s", op->stderr_data);
     }
     return rc;
 }
 
 GList *
 get_directory_list(const char *root, gboolean files, gboolean executable)
 {
     return services_os_get_directory_list(root, files, executable);
 }
 
 GList *
 services_list(void)
 {
     return resources_list_agents(PCMK_RESOURCE_CLASS_LSB, NULL);
 }
 
 #if SUPPORT_HEARTBEAT
 static GList *
 resources_os_list_hb_agents(void)
 {
     return services_os_get_directory_list(HB_RA_DIR, TRUE, TRUE);
 }
 #endif
 
 GList *
 resources_list_standards(void)
 {
     GList *standards = NULL;
     GList *agents = NULL;
 
     standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_OCF));
     standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_LSB));
     standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_SERVICE));
 
 #if SUPPORT_SYSTEMD
     agents = systemd_unit_listall();
     if (agents) {
         standards = g_list_append(standards,
                                   strdup(PCMK_RESOURCE_CLASS_SYSTEMD));
         g_list_free_full(agents, free);
     }
 #endif
 
 #if SUPPORT_UPSTART
     agents = upstart_job_listall();
     if (agents) {
         standards = g_list_append(standards,
                                   strdup(PCMK_RESOURCE_CLASS_UPSTART));
         g_list_free_full(agents, free);
     }
 #endif
 
 #if SUPPORT_NAGIOS
     agents = resources_os_list_nagios_agents();
     if (agents) {
         standards = g_list_append(standards,
                                   strdup(PCMK_RESOURCE_CLASS_NAGIOS));
         g_list_free_full(agents, free);
     }
 #endif
 
 #if SUPPORT_HEARTBEAT
     standards = g_list_append(standards, strdup(PCMK_RESOURCE_CLASS_HB));
 #endif
 
     return standards;
 }
 
 GList *
 resources_list_providers(const char *standard)
 {
     if (crm_provider_required(standard)) {
         return resources_os_list_ocf_providers();
     }
 
     return NULL;
 }
 
 GList *
 resources_list_agents(const char *standard, const char *provider)
 {
     if ((standard == NULL)
         || (strcasecmp(standard, PCMK_RESOURCE_CLASS_SERVICE) == 0)) {
 
         GList *tmp1;
         GList *tmp2;
         GList *result = resources_os_list_lsb_agents();
 
         if (standard == NULL) {
             tmp1 = result;
             tmp2 = resources_os_list_ocf_agents(NULL);
             if (tmp2) {
                 result = g_list_concat(tmp1, tmp2);
             }
         }
 #if SUPPORT_SYSTEMD
         tmp1 = result;
         tmp2 = systemd_unit_listall();
         if (tmp2) {
             result = g_list_concat(tmp1, tmp2);
         }
 #endif
 
 #if SUPPORT_UPSTART
         tmp1 = result;
         tmp2 = upstart_job_listall();
         if (tmp2) {
             result = g_list_concat(tmp1, tmp2);
         }
 #endif
 
         return result;
 
     } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_OCF) == 0) {
         return resources_os_list_ocf_agents(provider);
     } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_LSB) == 0) {
         return resources_os_list_lsb_agents();
 #if SUPPORT_HEARTBEAT
     } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_HB) == 0) {
         return resources_os_list_hb_agents();
 #endif
 #if SUPPORT_SYSTEMD
     } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_SYSTEMD) == 0) {
         return systemd_unit_listall();
 #endif
 #if SUPPORT_UPSTART
     } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_UPSTART) == 0) {
         return upstart_job_listall();
 #endif
 #if SUPPORT_NAGIOS
     } else if (strcasecmp(standard, PCMK_RESOURCE_CLASS_NAGIOS) == 0) {
         return resources_os_list_nagios_agents();
 #endif
     }
 
     return NULL;
 }
diff --git a/lib/services/systemd.c b/lib/services/systemd.c
index 6f3c28e654..4642386538 100644
--- a/lib/services/systemd.c
+++ b/lib/services/systemd.c
@@ -1,761 +1,792 @@
 /*
  * Copyright (C) 2012-2016 Andrew Beekhof <andrew@beekhof.net>
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/services.h>
 #include <crm/common/mainloop.h>
 
 #include <sys/stat.h>
 #include <gio/gio.h>
 #include <services_private.h>
 #include <systemd.h>
 #include <dbus/dbus.h>
 #include <pcmk-dbus.h>
 
 gboolean systemd_unit_exec_with_unit(svc_action_t * op, const char *unit);
 
 #define BUS_NAME         "org.freedesktop.systemd1"
 #define BUS_NAME_MANAGER BUS_NAME ".Manager"
 #define BUS_NAME_UNIT    BUS_NAME ".Unit"
 #define BUS_PATH         "/org/freedesktop/systemd1"
 
 static inline DBusMessage *
 systemd_new_method(const char *method)
 {
     crm_trace("Calling: %s on " BUS_NAME_MANAGER, method);
     return dbus_message_new_method_call(BUS_NAME, BUS_PATH, BUS_NAME_MANAGER,
                                         method);
 }
 
 /*
  * Functions to manage a static DBus connection
  */
 
 static DBusConnection* systemd_proxy = NULL;
 
 static inline DBusPendingCall *
 systemd_send(DBusMessage *msg,
              void(*done)(DBusPendingCall *pending, void *user_data),
              void *user_data, int timeout)
 {
     return pcmk_dbus_send(msg, systemd_proxy, done, user_data, timeout);
 }
 
 static inline DBusMessage *
 systemd_send_recv(DBusMessage *msg, DBusError *error, int timeout)
 {
     return pcmk_dbus_send_recv(msg, systemd_proxy, error, timeout);
 }
 
 /*!
  * \internal
  * \brief Send a method to systemd without arguments, and wait for reply
  *
  * \param[in] method  Method to send
  *
  * \return Systemd reply on success, NULL (and error will be logged) otherwise
  *
  * \note The caller must call dbus_message_unref() on the reply after
  *       handling it.
  */
 static DBusMessage *
 systemd_call_simple_method(const char *method)
 {
     DBusMessage *msg = systemd_new_method(method);
     DBusMessage *reply = NULL;
     DBusError error;
 
     /* Don't call systemd_init() here, because that calls this */
     CRM_CHECK(systemd_proxy, return NULL);
 
     if (msg == NULL) {
         crm_err("Could not create message to send %s to systemd", method);
         return NULL;
     }
 
     dbus_error_init(&error);
     reply = systemd_send_recv(msg, &error, DBUS_TIMEOUT_USE_DEFAULT);
     dbus_message_unref(msg);
 
     if (dbus_error_is_set(&error)) {
         crm_err("Could not send %s to systemd: %s (%s)",
                 method, error.message, error.name);
         dbus_error_free(&error);
         return NULL;
 
     } else if (reply == NULL) {
         crm_err("Could not send %s to systemd: no reply received", method);
         return NULL;
     }
 
     return reply;
 }
 
 static gboolean
 systemd_init(void)
 {
     static int need_init = 1;
     /* http://dbus.freedesktop.org/doc/api/html/group__DBusConnection.html */
 
     if (systemd_proxy
         && dbus_connection_get_is_connected(systemd_proxy) == FALSE) {
         crm_warn("Connection to System DBus is closed. Reconnecting...");
         pcmk_dbus_disconnect(systemd_proxy);
         systemd_proxy = NULL;
         need_init = 1;
     }
 
     if (need_init) {
         need_init = 0;
         systemd_proxy = pcmk_dbus_connect();
     }
     if (systemd_proxy == NULL) {
         return FALSE;
     }
     return TRUE;
 }
 
 static inline char *
 systemd_get_property(const char *unit, const char *name,
                      void (*callback)(const char *name, const char *value, void *userdata),
                      void *userdata, DBusPendingCall **pending, int timeout)
 {
     return systemd_proxy?
            pcmk_dbus_get_property(systemd_proxy, BUS_NAME, unit, BUS_NAME_UNIT,
                                   name, callback, userdata, pending, timeout)
            : NULL;
 }
 
 void
 systemd_cleanup(void)
 {
     if (systemd_proxy) {
         pcmk_dbus_disconnect(systemd_proxy);
         systemd_proxy = NULL;
     }
 }
 
 /*
  * end of systemd_proxy functions
  */
 
 /*!
  * \internal
  * \brief Check whether a file name represents a systemd unit
  *
  * \param[in] name  File name to check
  *
  * \return Pointer to "dot" before filename extension if so, NULL otherwise
  */
 static const char *
 systemd_unit_extension(const char *name)
 {
     if (name) {
         const char *dot = strrchr(name, '.');
 
         if (dot && (!strcmp(dot, ".service") || !strcmp(dot, ".socket"))) {
             return dot;
         }
     }
     return NULL;
 }
 
 static char *
 systemd_service_name(const char *name)
 {
     if (name == NULL) {
         return NULL;
     }
 
     if (systemd_unit_extension(name)) {
         return strdup(name);
     }
 
     return crm_strdup_printf("%s.service", name);
 }
 
 static void
 systemd_daemon_reload_complete(DBusPendingCall *pending, void *user_data)
 {
     DBusError error;
     DBusMessage *reply = NULL;
     unsigned int reload_count = GPOINTER_TO_UINT(user_data);
 
     dbus_error_init(&error);
     if(pending) {
         reply = dbus_pending_call_steal_reply(pending);
     }
 
     if (pcmk_dbus_find_error(pending, reply, &error)) {
         crm_err("Could not issue systemd reload %d: %s", reload_count, error.message);
         dbus_error_free(&error);
 
     } else {
         crm_trace("Reload %d complete", reload_count);
     }
 
     if(pending) {
         dbus_pending_call_unref(pending);
     }
     if(reply) {
         dbus_message_unref(reply);
     }
 }
 
 static bool
 systemd_daemon_reload(int timeout)
 {
     static unsigned int reload_count = 0;
     DBusMessage *msg = systemd_new_method("Reload");
 
     reload_count++;
     CRM_ASSERT(msg != NULL);
     systemd_send(msg, systemd_daemon_reload_complete,
                  GUINT_TO_POINTER(reload_count), timeout);
     dbus_message_unref(msg);
 
     return TRUE;
 }
 
 static bool
 systemd_mask_error(svc_action_t *op, const char *error)
 {
     crm_trace("Could not issue %s for %s: %s", op->action, op->rsc, error);
     if(strstr(error, "org.freedesktop.systemd1.InvalidName")
        || strstr(error, "org.freedesktop.systemd1.LoadFailed")
        || strstr(error, "org.freedesktop.systemd1.NoSuchUnit")) {
 
         if (safe_str_eq(op->action, "stop")) {
             crm_trace("Masking %s failure for %s: unknown services are stopped", op->action, op->rsc);
             op->rc = PCMK_OCF_OK;
             return TRUE;
 
         } else {
             crm_trace("Mapping %s failure for %s: unknown services are not installed", op->action, op->rsc);
             op->rc = PCMK_OCF_NOT_INSTALLED;
             op->status = PCMK_LRM_OP_NOT_INSTALLED;
             return FALSE;
         }
     }
 
     return FALSE;
 }
 
 static const char *
 systemd_loadunit_result(DBusMessage *reply, svc_action_t * op)
 {
     const char *path = NULL;
     DBusError error;
 
     if (pcmk_dbus_find_error((void*)&path, reply, &error)) {
         if(op && !systemd_mask_error(op, error.name)) {
             crm_err("Could not load systemd unit %s for %s: %s",
                     op->agent, op->id, error.message);
         }
         dbus_error_free(&error);
 
     } else if(pcmk_dbus_type_check(reply, NULL, DBUS_TYPE_OBJECT_PATH, __FUNCTION__, __LINE__)) {
         dbus_message_get_args (reply, NULL,
                                DBUS_TYPE_OBJECT_PATH, &path,
                                DBUS_TYPE_INVALID);
     }
 
     if(op) {
         if (path) {
             systemd_unit_exec_with_unit(op, path);
 
         } else if (op->synchronous == FALSE) {
             operation_finalize(op);
         }
     }
 
     return path;
 }
 
 
 static void
 systemd_loadunit_cb(DBusPendingCall *pending, void *user_data)
 {
     DBusMessage *reply = NULL;
     svc_action_t * op = user_data;
 
     if(pending) {
         reply = dbus_pending_call_steal_reply(pending);
     }
 
     crm_trace("Got result: %p for %p / %p for %s", reply, pending, op->opaque->pending, op->id);
 
     CRM_LOG_ASSERT(pending == op->opaque->pending);
     services_set_op_pending(op, NULL);
 
     systemd_loadunit_result(reply, user_data);
 
     if(reply) {
         dbus_message_unref(reply);
     }
 }
 
 static char *
 systemd_unit_by_name(const gchar * arg_name, svc_action_t *op)
 {
     DBusMessage *msg;
     DBusMessage *reply = NULL;
     DBusPendingCall* pending = NULL;
     char *name = NULL;
 
 /*
   Equivalent to GetUnit if it's already loaded
   <method name="LoadUnit">
    <arg name="name" type="s" direction="in"/>
    <arg name="unit" type="o" direction="out"/>
   </method>
  */
 
     if (systemd_init() == FALSE) {
         return FALSE;
     }
 
     msg = systemd_new_method("LoadUnit");
     CRM_ASSERT(msg != NULL);
 
     name = systemd_service_name(arg_name);
     CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &name, DBUS_TYPE_INVALID));
     free(name);
 
     if(op == NULL || op->synchronous) {
         const char *unit = NULL;
         char *munit = NULL;
 
         reply = systemd_send_recv(msg, NULL,
                                   (op? op->timeout : DBUS_TIMEOUT_USE_DEFAULT));
         dbus_message_unref(msg);
 
         unit = systemd_loadunit_result(reply, op);
         if(unit) {
             munit = strdup(unit);
         }
         if(reply) {
             dbus_message_unref(reply);
         }
         return munit;
     }
 
     pending = systemd_send(msg, systemd_loadunit_cb, op, op->timeout);
     if(pending) {
         services_set_op_pending(op, pending);
     }
 
     dbus_message_unref(msg);
     return NULL;
 }
 
 GList *
 systemd_unit_listall(void)
 {
-    int lpc = 0;
+    int nfiles = 0;
     GList *units = NULL;
     DBusMessageIter args;
     DBusMessageIter unit;
     DBusMessageIter elem;
     DBusMessage *reply = NULL;
 
     if (systemd_init() == FALSE) {
         return NULL;
     }
 
 /*
-        "  <method name=\"ListUnits\">\n"                               \
-        "   <arg name=\"units\" type=\"a(ssssssouso)\" direction=\"out\"/>\n" \
+        "  <method name=\"ListUnitFiles\">\n"                               \
+        "   <arg name=\"files\" type=\"a(ss)\" direction=\"out\"/>\n" \
         "  </method>\n"                                                 \
 */
 
-    reply = systemd_call_simple_method("ListUnits");
+    reply = systemd_call_simple_method("ListUnitFiles");
     if (reply == NULL) {
         return NULL;
     }
     if (!dbus_message_iter_init(reply, &args)) {
-        crm_err("Could not list systemd units: systemd reply has no arguments");
+        crm_err("Could not list systemd unit files: systemd reply has no arguments");
         dbus_message_unref(reply);
         return NULL;
     }
     if (!pcmk_dbus_type_check(reply, &args, DBUS_TYPE_ARRAY,
                               __FUNCTION__, __LINE__)) {
-        crm_err("Could not list systemd units: systemd reply has invalid arguments");
+        crm_err("Could not list systemd unit files: systemd reply has invalid arguments");
         dbus_message_unref(reply);
         return NULL;
     }
 
     dbus_message_iter_recurse(&args, &unit);
-    while (dbus_message_iter_get_arg_type (&unit) != DBUS_TYPE_INVALID) {
+    for (; dbus_message_iter_get_arg_type(&unit) != DBUS_TYPE_INVALID;
+        dbus_message_iter_next(&unit)) {
+
         DBusBasicValue value;
+        const char *match = NULL;
+        char *unit_name = NULL;
+        char *basename = NULL;
 
         if(!pcmk_dbus_type_check(reply, &unit, DBUS_TYPE_STRUCT, __FUNCTION__, __LINE__)) {
+            crm_debug("ListUnitFiles reply has unexpected type");
             continue;
         }
 
         dbus_message_iter_recurse(&unit, &elem);
         if(!pcmk_dbus_type_check(reply, &elem, DBUS_TYPE_STRING, __FUNCTION__, __LINE__)) {
+            crm_debug("ListUnitFiles reply does not contain a string");
             continue;
         }
 
         dbus_message_iter_get_basic(&elem, &value);
-        crm_trace("DBus ListUnits listed: %s", value.str);
-        if(value.str) {
-            const char *match = systemd_unit_extension(value.str);
-
-            if (match) {
-                char *unit_name;
-
-                if (!strcmp(match, ".service")) {
-                    /* service is the "default" unit type, so strip it */
-                    unit_name = strndup(value.str, match - value.str);
-                } else {
-                    unit_name = strdup(value.str);
-                }
-                lpc++;
-                units = g_list_append(units, unit_name);
-            }
+        if (value.str == NULL) {
+            crm_debug("ListUnitFiles reply did not provide a string");
+            continue;
+        }
+        crm_trace("DBus ListUnitFiles listed: %s", value.str);
+
+        match = systemd_unit_extension(value.str);
+        if (match == NULL) {
+            // Unit files always have an extension, so skip if not present
+            crm_debug("ListUnitFiles entry '%s' does not have an extension",
+                      value.str);
+            continue;
+        }
+
+        // ListUnitFiles returns full path names
+        basename = strrchr(value.str, '/');
+        if (basename) {
+            basename = basename + 1;
+        } else {
+            basename = value.str;
+        }
+
+        /* Unit files will include types (such as .target) that we can't manage,
+         * so filter the replies here.
+         */
+        if (!strcmp(match, ".service")) {
+            // Service is the "default" unit type, so strip it
+            unit_name = strndup(basename, match - basename);
+
+        } else if (!strcmp(match, ".mount")
+                   || !strcmp(match, ".socket")) {
+            unit_name = strdup(basename);
+        }
+        if (unit_name == NULL) {
+            crm_trace("ListUnitFiles entry '%s' is not manageable",
+                      value.str);
+            continue;
         }
-        dbus_message_iter_next (&unit);
+
+        nfiles++;
+        units = g_list_prepend(units, unit_name);
     }
 
     dbus_message_unref(reply);
 
-    crm_trace("Found %d systemd services", lpc);
+    crm_trace("Found %d manageable systemd unit files", nfiles);
+    units = g_list_sort(units, crm_alpha_sort);
     return units;
 }
 
 gboolean
 systemd_unit_exists(const char *name)
 {
     char *unit = NULL;
 
     /* Note: Makes a blocking dbus calls
      * Used by resources_find_service_class() when resource class=service
      */
     unit = systemd_unit_by_name(name, NULL);
     if(unit) {
         free(unit);
         return TRUE;
     }
     return FALSE;
 }
 
 static char *
 systemd_unit_metadata(const char *name, int timeout)
 {
     char *meta = NULL;
     char *desc = NULL;
     char *path = systemd_unit_by_name(name, NULL);
 
     if (path) {
         /* TODO: Worth a making blocking call for? Probably not. Possibly if cached. */
         desc = systemd_get_property(path, "Description", NULL, NULL, NULL,
                                     timeout);
     } else {
         desc = crm_strdup_printf("Systemd unit file for %s", name);
     }
 
     meta = crm_strdup_printf("<?xml version=\"1.0\"?>\n"
                            "<!DOCTYPE resource-agent SYSTEM \"ra-api-1.dtd\">\n"
                            "<resource-agent name=\"%s\" version=\"" PCMK_DEFAULT_AGENT_VERSION "\">\n"
                            "  <version>1.0</version>\n"
                            "  <longdesc lang=\"en\">\n"
                            "    %s\n"
                            "  </longdesc>\n"
                            "  <shortdesc lang=\"en\">systemd unit file for %s</shortdesc>\n"
                            "  <parameters>\n"
                            "  </parameters>\n"
                            "  <actions>\n"
                            "    <action name=\"start\"   timeout=\"100\" />\n"
                            "    <action name=\"stop\"    timeout=\"100\" />\n"
                            "    <action name=\"status\"  timeout=\"100\" />\n"
                            "    <action name=\"monitor\" timeout=\"100\" interval=\"60\"/>\n"
                            "    <action name=\"meta-data\"  timeout=\"5\" />\n"
                            "  </actions>\n"
                            "  <special tag=\"systemd\">\n"
                            "  </special>\n" "</resource-agent>\n", name, desc, name);
     free(desc);
     free(path);
     return meta;
 }
 
 static void
 systemd_exec_result(DBusMessage *reply, svc_action_t *op)
 {
     DBusError error;
 
     if (pcmk_dbus_find_error((void*)&error, reply, &error)) {
 
         /* ignore "already started" or "not running" errors */
         if (!systemd_mask_error(op, error.name)) {
             crm_err("Could not issue %s for %s: %s", op->action, op->rsc, error.message);
         }
         dbus_error_free(&error);
 
     } else {
         if(!pcmk_dbus_type_check(reply, NULL, DBUS_TYPE_OBJECT_PATH, __FUNCTION__, __LINE__)) {
             crm_warn("Call to %s passed but return type was unexpected", op->action);
             op->rc = PCMK_OCF_OK;
 
         } else {
             const char *path = NULL;
 
             dbus_message_get_args (reply, NULL,
                                    DBUS_TYPE_OBJECT_PATH, &path,
                                    DBUS_TYPE_INVALID);
             crm_info("Call to %s passed: %s", op->action, path);
             op->rc = PCMK_OCF_OK;
         }
     }
 
     operation_finalize(op);
 }
 
 static void
 systemd_async_dispatch(DBusPendingCall *pending, void *user_data)
 {
     DBusMessage *reply = NULL;
     svc_action_t *op = user_data;
 
     if(pending) {
         reply = dbus_pending_call_steal_reply(pending);
     }
 
     crm_trace("Got result: %p for %p for %s, %s", reply, pending, op->rsc, op->action);
 
     CRM_LOG_ASSERT(pending == op->opaque->pending);
     services_set_op_pending(op, NULL);
     systemd_exec_result(reply, op);
 
     if(reply) {
         dbus_message_unref(reply);
     }
 }
 
 #define SYSTEMD_OVERRIDE_ROOT "/run/systemd/system/"
 
 static void
 systemd_unit_check(const char *name, const char *state, void *userdata)
 {
     svc_action_t * op = userdata;
 
     crm_trace("Resource %s has %s='%s'", op->rsc, name, state);
 
     if(state == NULL) {
         op->rc = PCMK_OCF_NOT_RUNNING;
 
     } else if (g_strcmp0(state, "active") == 0) {
         op->rc = PCMK_OCF_OK;
     } else if (g_strcmp0(state, "reloading") == 0) {
         op->rc = PCMK_OCF_OK;
     } else if (g_strcmp0(state, "activating") == 0) {
         op->rc = PCMK_OCF_PENDING;
     } else if (g_strcmp0(state, "deactivating") == 0) {
         op->rc = PCMK_OCF_PENDING;
     } else {
         op->rc = PCMK_OCF_NOT_RUNNING;
     }
 
     if (op->synchronous == FALSE) {
         services_set_op_pending(op, NULL);
         operation_finalize(op);
     }
 }
 
 gboolean
 systemd_unit_exec_with_unit(svc_action_t * op, const char *unit)
 {
     const char *method = op->action;
     DBusMessage *msg = NULL;
     DBusMessage *reply = NULL;
 
     CRM_ASSERT(unit);
 
     if (safe_str_eq(op->action, "monitor") || safe_str_eq(method, "status")) {
         DBusPendingCall *pending = NULL;
         char *state;
 
         state = systemd_get_property(unit, "ActiveState",
                                      (op->synchronous? NULL : systemd_unit_check),
                                      op, (op->synchronous? NULL : &pending),
                                      op->timeout);
         if (op->synchronous) {
             systemd_unit_check("ActiveState", state, op);
             free(state);
             return op->rc == PCMK_OCF_OK;
         } else if (pending) {
             services_set_op_pending(op, pending);
             return TRUE;
 
         } else {
             return operation_finalize(op);
         }
 
     } else if (g_strcmp0(method, "start") == 0) {
         FILE *file_strm = NULL;
         char *override_dir = crm_strdup_printf("%s/%s.service.d", SYSTEMD_OVERRIDE_ROOT, op->agent);
         char *override_file = crm_strdup_printf("%s/%s.service.d/50-pacemaker.conf", SYSTEMD_OVERRIDE_ROOT, op->agent);
         mode_t orig_umask;
 
         method = "StartUnit";
         crm_build_path(override_dir, 0755);
 
         /* Ensure the override file is world-readable. This is not strictly
          * necessary, but it avoids a systemd warning in the logs.
          */
         orig_umask = umask(S_IWGRP | S_IWOTH);
         file_strm = fopen(override_file, "w");
         umask(orig_umask);
 
         if (file_strm != NULL) {
             /* TODO: Insert the start timeout in too */
             char *override = crm_strdup_printf(
                 "[Unit]\n"
                 "Description=Cluster Controlled %s\n"
                 "Before=pacemaker.service\n"
                 "\n"
                 "[Service]\n"
                 "Restart=no\n",
                 op->agent);
 
             int rc = fprintf(file_strm, "%s\n", override);
 
             free(override);
             if (rc < 0) {
                 crm_perror(LOG_ERR, "Cannot write to systemd override file %s", override_file);
             }
 
         } else {
             crm_err("Cannot open systemd override file %s for writing", override_file);
         }
 
         if (file_strm != NULL) {
             fflush(file_strm);
             fclose(file_strm);
         }
         systemd_daemon_reload(op->timeout);
         free(override_file);
         free(override_dir);
 
     } else if (g_strcmp0(method, "stop") == 0) {
         char *override_file = crm_strdup_printf("%s/%s.service.d/50-pacemaker.conf", SYSTEMD_OVERRIDE_ROOT, op->agent);
 
         method = "StopUnit";
         unlink(override_file);
         free(override_file);
         systemd_daemon_reload(op->timeout);
 
     } else if (g_strcmp0(method, "restart") == 0) {
         method = "RestartUnit";
 
     } else {
         op->rc = PCMK_OCF_UNIMPLEMENT_FEATURE;
         goto cleanup;
     }
 
     crm_debug("Calling %s for %s: %s", method, op->rsc, unit);
 
     msg = systemd_new_method(method);
     CRM_ASSERT(msg != NULL);
 
     /* (ss) */
     {
         const char *replace_s = "replace";
         char *name = systemd_service_name(op->agent);
 
         CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &name, DBUS_TYPE_INVALID));
         CRM_LOG_ASSERT(dbus_message_append_args(msg, DBUS_TYPE_STRING, &replace_s, DBUS_TYPE_INVALID));
 
         free(name);
     }
 
     if (op->synchronous == FALSE) {
         DBusPendingCall *pending = systemd_send(msg, systemd_async_dispatch,
                                                 op, op->timeout);
 
         dbus_message_unref(msg);
         if(pending) {
             services_set_op_pending(op, pending);
             return TRUE;
 
         } else {
             return operation_finalize(op);
         }
 
     } else {
         reply = systemd_send_recv(msg, NULL, op->timeout);
         dbus_message_unref(msg);
         systemd_exec_result(reply, op);
 
         if(reply) {
             dbus_message_unref(reply);
         }
         return FALSE;
     }
 
   cleanup:
     if (op->synchronous == FALSE) {
         return operation_finalize(op);
     }
 
     return op->rc == PCMK_OCF_OK;
 }
 
 static gboolean
 systemd_timeout_callback(gpointer p)
 {
     svc_action_t * op = p;
 
     op->opaque->timerid = 0;
     crm_warn("%s operation on systemd unit %s named '%s' timed out", op->action, op->agent, op->rsc);
     operation_finalize(op);
 
     return FALSE;
 }
 
 /* For an asynchronous 'op', returns FALSE if 'op' should be free'd by the caller */
 /* For a synchronous 'op', returns FALSE if 'op' fails */
 gboolean
 systemd_unit_exec(svc_action_t * op)
 {
     char *unit = NULL;
 
     CRM_ASSERT(op);
     CRM_ASSERT(systemd_init());
     op->rc = PCMK_OCF_UNKNOWN_ERROR;
     crm_debug("Performing %ssynchronous %s op on systemd unit %s named '%s'",
               op->synchronous ? "" : "a", op->action, op->agent, op->rsc);
 
     if (safe_str_eq(op->action, "meta-data")) {
         /* TODO: See if we can teach the lrmd not to make these calls synchronously */
         op->stdout_data = systemd_unit_metadata(op->agent, op->timeout);
         op->rc = PCMK_OCF_OK;
 
         if (op->synchronous == FALSE) {
             return operation_finalize(op);
         }
         return TRUE;
     }
 
     unit = systemd_unit_by_name(op->agent, op);
     free(unit);
 
     if (op->synchronous == FALSE) {
         if (op->opaque->pending) {
             op->opaque->timerid = g_timeout_add(op->timeout + 5000, systemd_timeout_callback, op);
             services_add_inflight_op(op);
             return TRUE;
 
         } else {
             return operation_finalize(op);
         }
     }
 
     return op->rc == PCMK_OCF_OK;
 }
diff --git a/mcp/pacemaker.sysconfig b/mcp/pacemaker.sysconfig
index 0ca8f9de06..fe92a94643 100644
--- a/mcp/pacemaker.sysconfig
+++ b/mcp/pacemaker.sysconfig
@@ -1,142 +1,142 @@
 # If pacemaker is started via init script, the script may attempt to detect the
 # cluster layer in use. This option forces it to recognize a particular type,
 # in case its detection is inaccurate. Currently, the only value that is
 # significant is "cman", which will cause the init script to start and stop
 # important ancillary services so that services such as fenced and cman can
 # reliably shut down. Any other value is ignored. The default is unset.
 # PCMK_STACK=cman
 
 #==#==# Variables that control logging
 
 # Enable debug logging globally or per-subsystem.
 # Multiple subsystems may be listed separated by commas,
 # e.g. PCMK_debug=crmd,pengine
 # PCMK_debug=yes|no|crmd|pengine|cib|stonith-ng|attrd|pacemakerd
 
 # Send detailed log messages to the specified file. Compared to messages logged
 # via syslog, messages in this file may have extended information, and will
 # include messages of "info" severity (and, if debug and/or trace logging
 # has been enabled, those as well). This log is of more use to developers and
 # advanced system administrators, and when reporting problems. By default,
 # Pacemaker will use the value of logfile in corosync.conf, if found.
 # PCMK_logfile=/var/log/pacemaker.log
 
 # Enable logging via syslog, using the specified syslog facility. Messages sent
 # here are of value to all Pacemaker users. This can be disabled using "none",
 # but that is not recommended. The default is "daemon".
 # PCMK_logfacility=none|daemon|user|local0|local1|local2|local3|local4|local5|local6|local7
 
 # Unless syslog logging is disabled using PCMK_logfacility=none, messages of
 # the specified severity and higher will be sent to syslog. The default value
 # of "notice" is appropriate for most installations; "info" is highly verbose
 # and "debug" is almost certain to send you blind (which is why there is a
 # separate detail log specified by PCMK_logfile).
 # PCMK_logpriority=emerg|alert|crit|error|warning|notice|info|debug
 
 # Log all messages from a comma-separated list of functions.
 # PCMK_trace_functions=function1,function2,function3
 
 # Log all messages from a comma-separated list of files (no path).
 # Wildcards are supported, e.g. PCMK_trace_files=prefix*.c
 # PCMK_trace_files=file.c,other.h
 
 # Log all messages matching comma-separated list of formats.
 # PCMK_trace_formats="Sent delete %d"
 
 # Log all messages from a comma-separated list of tags.
 # PCMK_trace_tags=tag1,tag2
 
 # Dump the blackbox whenever the message at function and line is emitted,
 # e.g. PCMK_trace_blackbox=te_graph_trigger:223,unpack_clone:81
 # PCMK_trace_blackbox=fn:line,fn2:line2,...
 
 # Enable blackbox logging globally or per-subsystem. The blackbox contains a
 # rolling buffer of all logs (including info, debug, and trace) and is written
 # after a crash or assertion failure, and/or when SIGTRAP is received. The
 # blackbox recorder can also be enabled for Pacemaker daemons at runtime by
 # sending SIGUSR1 (or SIGTRAP), and disabled by sending SIGUSR2. Multiple
 # subsystems may be listed separated by commas, e.g. PCMK_blackbox=crmd,pengine
 # PCMK_blackbox=yes|no|crmd|pengine|cib|stonith-ng|attrd|pacemakerd
 
 #==#==# Advanced use only
 
 # By default, nodes will join the cluster in an online state when they first
 # start, unless they were previously put into standby mode. If this variable is
 # set to "standby" or "online", it will force this node to join in the
 # specified state when starting.
-# (experimental; currently ignored for Pacemaker Remote nodes)
+# (only supported for cluster nodes, not Pacemaker Remote nodes)
 # PCMK_node_start_state=default
 
 # If the cluster uses an older version of corosync (prior to 2.0), set this to
 # "true", which will use a node's uname as its UUID. The default, "false", is
 # appropriate for newer versions of corosync, and will use a node's corosync ID
 # as its UUID. It is ignored by clusters that do not use corosync.
 # PCMK_uname_is_uuid=false
 
 # Specify an alternate location for RNG schemas and XSL transforms.
 # (This is of use only to developers.)
 # PCMK_schema_directory=/some/path
 
 # Pacemaker consists of a master process with multiple subsidiary daemons. If
 # one of the daemons crashes, the master process will normally attempt to
 # restart it. If this is set to "true", the master process will instead panic
 # the host (see PCMK_panic_action). The default is unset.
 # PCMK_fail_fast=no
 
 # Pacemaker will panic its host under certain conditions. If this is set to
 # "crash", Pacemaker will trigger a kernel crash (which is useful if you want a
 # kernel dump to investigate). For any other value, Pacemaker will trigger a
 # host reboot. The default is unset.
 # PCMK_panic_action=crash
 
 #==#==# Pacemaker Remote
 # Use the contents of this file as the authorization key to use with Pacemaker
 # Remote connections. This file must be readable by Pacemaker daemons (that is,
 # it must allow read permissions to either the hacluster user or the haclient
 # group), and its contents must be identical on all nodes. The default is
 # "/etc/pacemaker/authkey".
 # PCMK_authkey_location=/etc/pacemaker/authkey
 
 # Use this TCP port number when connecting to a Pacemaker Remote node. This
 # value must be the same on all nodes. The default is "3121".
 # PCMK_remote_port=3121
 
 #==#==# IPC
 
 # Force use of a particular class of IPC connection.
 # PCMK_ipc_type=shared-mem|socket|posix|sysv
 
 # Specify an IPC buffer size in bytes. This is useful when connecting to really
 # big clusters that exceed the default 128KB buffer.
 # PCMK_ipc_buffer=131072
 
 #==#==# Profiling and memory leak testing (mainly useful to developers)
 
 # Affect the behavior of glib's memory allocator. Setting to "always-malloc"
 # when running under valgrind will help valgrind track malloc/free better;
 # setting to "debug-blocks" when not running under valgrind will perform
 # (somewhat expensive) memory checks.
 # G_SLICE=always-malloc
 
 # Uncommenting this will make malloc() initialize newly allocated memory
 # and free() wipe it (to help catch uninitialized-memory/use-after-free).
 # MALLOC_PERTURB_=221
 
 # Uncommenting this will make malloc() and friends print to stderr and abort
 # for some (inexpensive) memory checks.
 # MALLOC_CHECK_=3
 
 # Set to yes/no or cib,crmd etc. to run some or all daemons under valgrind.
 # PCMK_valgrind_enabled=yes
 # PCMK_valgrind_enabled=cib,crmd
 
 # Set to yes/no or cib,crmd etc. to run some or all daemons under valgrind with
 # the callgrind tool enabled.
 # PCMK_callgrind_enabled=yes
 # PCMK_callgrind_enabled=cib,crmd
 
 # Set the options to pass to valgrind, when valgrind is enabled. See
 # valgrind(1) man page for details. "--vgdb=no" is specified because lrmd can
 # lower privileges when executing commands, which would otherwise leave a bunch
 # of unremovable files in /tmp.
 VALGRIND_OPTS="--leak-check=full --trace-children=no --vgdb=no --num-callers=25 --log-file=/var/lib/pacemaker/valgrind-%p --suppressions=/usr/share/pacemaker/tests/valgrind-pcmk.suppressions --gen-suppressions=all"
diff --git a/pengine/graph.c b/pengine/graph.c
index 8ee6aa8f69..e03d2c0d6b 100644
--- a/pengine/graph.c
+++ b/pengine/graph.c
@@ -1,1715 +1,1719 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #include <glib.h>
 
 #include <allocate.h>
 #include <utils.h>
 
 void update_colo_start_chain(action_t * action);
 gboolean rsc_update_action(action_t * first, action_t * then, enum pe_ordering type);
 
 static enum pe_action_flags
 get_action_flags(action_t * action, node_t * node)
 {
     enum pe_action_flags flags = action->flags;
 
     if (action->rsc) {
         flags = action->rsc->cmds->action_flags(action, NULL);
 
         if (pe_rsc_is_clone(action->rsc) && node) {
 
             /* We only care about activity on $node */
             enum pe_action_flags clone_flags = action->rsc->cmds->action_flags(action, node);
 
             /* Go to great lengths to ensure the correct value for pe_action_runnable...
              *
              * If we are a clone, then for _ordering_ constraints, it's only relevant
              * if we are runnable _anywhere_.
              *
              * This only applies to _runnable_ though, and only for ordering constraints.
              * If this function is ever used during colocation, then we'll need additional logic
              *
              * Not very satisfying, but it's logical and appears to work well.
              */
             if (is_not_set(clone_flags, pe_action_runnable)
                 && is_set(flags, pe_action_runnable)) {
                 pe_rsc_trace(action->rsc, "Fixing up runnable flag for %s", action->uuid);
                 set_bit(clone_flags, pe_action_runnable);
             }
             flags = clone_flags;
         }
     }
     return flags;
 }
 
 static char *
 convert_non_atomic_uuid(char *old_uuid, resource_t * rsc, gboolean allow_notify,
                         gboolean free_original)
 {
     int interval = 0;
     char *uuid = NULL;
     char *rid = NULL;
     char *raw_task = NULL;
     int task = no_action;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "Processing %s", old_uuid);
     if (old_uuid == NULL) {
         return NULL;
 
     } else if (strstr(old_uuid, "notify") != NULL) {
         goto done;              /* no conversion */
 
     } else if (rsc->variant < pe_group) {
         goto done;              /* no conversion */
     }
 
     CRM_ASSERT(parse_op_key(old_uuid, &rid, &raw_task, &interval));
     if (interval > 0) {
         goto done;              /* no conversion */
     }
 
     task = text2task(raw_task);
     switch (task) {
         case stop_rsc:
         case start_rsc:
         case action_notify:
         case action_promote:
         case action_demote:
             break;
         case stopped_rsc:
         case started_rsc:
         case action_notified:
         case action_promoted:
         case action_demoted:
             task--;
             break;
         case monitor_rsc:
         case shutdown_crm:
         case stonith_node:
             task = no_action;
             break;
         default:
             crm_err("Unknown action: %s", raw_task);
             task = no_action;
             break;
     }
 
     if (task != no_action) {
         if (is_set(rsc->flags, pe_rsc_notify) && allow_notify) {
             uuid = generate_notify_key(rid, "confirmed-post", task2text(task + 1));
 
         } else {
             uuid = generate_op_key(rid, task2text(task + 1), 0);
         }
         pe_rsc_trace(rsc, "Converted %s -> %s", old_uuid, uuid);
     }
 
   done:
     if (uuid == NULL) {
         uuid = strdup(old_uuid);
     }
 
     if (free_original) {
         free(old_uuid);
     }
 
     free(raw_task);
     free(rid);
     return uuid;
 }
 
 static action_t *
 rsc_expand_action(action_t * action)
 {
     gboolean notify = FALSE;
     action_t *result = action;
     resource_t *rsc = action->rsc;
 
     if (rsc == NULL) {
         return action;
     }
 
     if ((rsc->parent == NULL)
         || (pe_rsc_is_clone(rsc) && (rsc->parent->variant == pe_container))) {
         /* Only outermost resources have notification actions.
          * The exception is those in bundles.
          */
         notify = is_set(rsc->flags, pe_rsc_notify);
     }
 
     if (rsc->variant >= pe_group) {
         /* Expand 'start' -> 'started' */
         char *uuid = NULL;
 
         uuid = convert_non_atomic_uuid(action->uuid, rsc, notify, FALSE);
         if (uuid) {
             pe_rsc_trace(rsc, "Converting %s to %s %d", action->uuid, uuid,
                          is_set(rsc->flags, pe_rsc_notify));
             result = find_first_action(rsc->actions, uuid, NULL, NULL);
             if (result == NULL) {
                 crm_err("Couldn't expand %s to %s in %s", action->uuid, uuid, rsc->id);
                 result = action;
             }
             free(uuid);
         }
     }
     return result;
 }
 
 static enum pe_graph_flags
 graph_update_action(action_t * first, action_t * then, node_t * node,
                     enum pe_action_flags first_flags, enum pe_action_flags then_flags,
                     enum pe_ordering type)
 {
     enum pe_graph_flags changed = pe_graph_none;
     gboolean processed = FALSE;
 
     /* TODO: Do as many of these in parallel as possible */
 
     if (type & pe_order_implies_then) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional,
                                                 pe_action_optional, pe_order_implies_then);
 
         } else if (is_set(first_flags, pe_action_optional) == FALSE) {
             if (update_action_flags(then, pe_action_optional | pe_action_clear, __FUNCTION__, __LINE__)) {
                 changed |= pe_graph_updated_then;
             }
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "implies right: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("implies right: %s then %s %p", first->uuid, then->uuid, then->rsc);
         }
     }
 
     if ((type & pe_order_restart) && then->rsc) {
         enum pe_action_flags restart = (pe_action_optional | pe_action_runnable);
 
         processed = TRUE;
         changed |=
             then->rsc->cmds->update_actions(first, then, node, first_flags, restart, pe_order_restart);
         if (changed) {
             pe_rsc_trace(then->rsc, "restart: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("restart: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_implies_first) {
         processed = TRUE;
         if (first->rsc) {
             changed |=
                 first->rsc->cmds->update_actions(first, then, node, first_flags,
                                                  pe_action_optional, pe_order_implies_first);
 
         } else if (is_set(first_flags, pe_action_optional) == FALSE) {
             pe_rsc_trace(first->rsc, "first unrunnable: %s (%d) then %s (%d)",
                          first->uuid, is_set(first_flags, pe_action_optional),
                          then->uuid, is_set(then_flags, pe_action_optional));
             if (update_action_flags(first, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) {
                 changed |= pe_graph_updated_first;
             }
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc, "implies left: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("implies left: %s (%d) then %s (%d)",
                       first->uuid, is_set(first_flags, pe_action_optional),
                       then->uuid, is_set(then_flags, pe_action_optional));
         }
     }
 
     if (type & pe_order_implies_first_master) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags & pe_action_optional,
                                                 pe_action_optional, pe_order_implies_first_master);
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc,
                          "implies left when right rsc is Master role: %s then %s: changed",
                          first->uuid, then->uuid);
         } else {
             crm_trace("implies left when right rsc is Master role: %s then %s", first->uuid,
                       then->uuid);
         }
     }
 
     if (type & pe_order_one_or_more) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags,
                                                 pe_action_runnable, pe_order_one_or_more);
 
         } else if (is_set(first_flags, pe_action_runnable)) {
             /* alright. a "first" action is considered runnable, incremente
              * the 'runnable_before' counter */
             then->runnable_before++;
 
             /* if the runnable before count for then exceeds the required number
              * of "before" runnable actions... mark then as runnable */
             if (then->runnable_before >= then->required_runnable_before) {
                 if (update_action_flags(then, pe_action_runnable, __FUNCTION__, __LINE__)) {
                     changed |= pe_graph_updated_then;
                 }
             }
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "runnable_one_or_more: %s then %s: changed", first->uuid,
                          then->uuid);
         } else {
             crm_trace("runnable_one_or_more: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_runnable_left) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags,
                                                 pe_action_runnable, pe_order_runnable_left);
 
         } else if (is_set(first_flags, pe_action_runnable) == FALSE) {
             pe_rsc_trace(then->rsc, "then unrunnable: %s then %s", first->uuid, then->uuid);
             if (update_action_flags(then, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) {
                  changed |= pe_graph_updated_then;
             }
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "runnable: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("runnable: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_implies_first_migratable) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags,
                                                 pe_action_optional, pe_order_implies_first_migratable);
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("optional: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_pseudo_left) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags,
                                                 pe_action_optional, pe_order_pseudo_left);
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("optional: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_optional) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags,
                                                 pe_action_runnable, pe_order_optional);
         }
         if (changed) {
             pe_rsc_trace(then->rsc, "optional: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("optional: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (type & pe_order_asymmetrical) {
         processed = TRUE;
         if (then->rsc) {
             changed |=
                 then->rsc->cmds->update_actions(first, then, node, first_flags,
                                                 pe_action_runnable, pe_order_asymmetrical);
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc, "asymmetrical: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("asymmetrical: %s then %s", first->uuid, then->uuid);
         }
 
     }
 
     if ((first->flags & pe_action_runnable) && (type & pe_order_implies_then_printed)
         && (first_flags & pe_action_optional) == 0) {
         processed = TRUE;
         crm_trace("%s implies %s printed", first->uuid, then->uuid);
         update_action_flags(then, pe_action_print_always, __FUNCTION__, __LINE__);  /* don't care about changed */
     }
 
     if (is_set(type, pe_order_implies_first_printed) && is_set(then_flags, pe_action_optional) == FALSE) {
         processed = TRUE;
         crm_trace("%s implies %s printed", then->uuid, first->uuid);
         update_action_flags(first, pe_action_print_always, __FUNCTION__, __LINE__); /* don't care about changed */
     }
 
     if ((type & pe_order_implies_then
          || type & pe_order_implies_first
          || type & pe_order_restart)
         && first->rsc
         && safe_str_eq(first->task, RSC_STOP)
         && is_not_set(first->rsc->flags, pe_rsc_managed)
         && is_set(first->rsc->flags, pe_rsc_block)
         && is_not_set(first->flags, pe_action_runnable)) {
 
         if (update_action_flags(then, pe_action_runnable | pe_action_clear, __FUNCTION__, __LINE__)) {
             changed |= pe_graph_updated_then;
         }
 
         if (changed) {
             pe_rsc_trace(then->rsc, "unmanaged left: %s then %s: changed", first->uuid, then->uuid);
         } else {
             crm_trace("unmanaged left: %s then %s", first->uuid, then->uuid);
         }
     }
 
     if (processed == FALSE) {
         crm_trace("Constraint 0x%.6x not applicable", type);
     }
 
     return changed;
 }
 
 static void
 mark_start_blocked(resource_t *rsc, resource_t *reason)
 {
     GListPtr gIter = rsc->actions;
     char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
 
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         if (safe_str_neq(action->task, RSC_START)) {
             continue;
         }
         if (is_set(action->flags, pe_action_runnable)) {
             pe_action_set_flag_reason(__FUNCTION__, __LINE__, action, NULL, reason_text, pe_action_runnable, FALSE);
             update_colo_start_chain(action);
             update_action(action);
         }
     }
     free(reason_text);
 }
 
 void
 update_colo_start_chain(action_t *action)
 {
     GListPtr gIter = NULL;
     resource_t *rsc = NULL;
 
     if (is_not_set(action->flags, pe_action_runnable) && safe_str_eq(action->task, RSC_START)) {
         rsc = uber_parent(action->rsc);
         if (rsc->parent) {
-            // This is a bundle (uber_parent() stops _before_ the bundle)
+            /* For bundles, uber_parent() returns the clone/master, not the
+             * bundle, so the existence of rsc->parent implies this is a bundle.
+             * In this case, we need the bundle resource, so that we can check
+             * if all containers are stopped/stopping.
+             */
             rsc = rsc->parent;
         }
     }
 
     if (rsc == NULL || rsc->rsc_cons_lhs == NULL) {
         return;
     }
 
     /* if rsc has children, all the children need to have start set to
      * unrunnable before we follow the colo chain for the parent. */
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         resource_t *child = (resource_t *)gIter->data;
         action_t *start = find_first_action(child->actions, NULL, RSC_START, NULL);
         if (start == NULL || is_set(start->flags, pe_action_runnable)) {
             return;
         }
     }
 
     for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         rsc_colocation_t *colocate_with = (rsc_colocation_t *)gIter->data;
         if (colocate_with->score == INFINITY) {
             mark_start_blocked(colocate_with->rsc_lh, action->rsc);
         }
     }
 }
 
 gboolean
 update_action(action_t * then)
 {
     GListPtr lpc = NULL;
     enum pe_graph_flags changed = pe_graph_none;
     int last_flags = then->flags;
 
     crm_trace("Processing %s (%s %s %s)",
               then->uuid,
               is_set(then->flags, pe_action_optional) ? "optional" : "required",
               is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable",
               is_set(then->flags,
                      pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->uname : "");
 
     if (is_set(then->flags, pe_action_requires_any)) {
         /* initialize current known runnable before actions to 0
          * from here as graph_update_action is called for each of
          * then's before actions, this number will increment as
          * runnable 'first' actions are encountered */
         then->runnable_before = 0;
 
         /* for backwards compatibility with previous options that use
          * the 'requires_any' flag, initialize required to 1 if it is
          * not set. */ 
         if (then->required_runnable_before == 0) {
             then->required_runnable_before = 1;
         }
         pe_clear_action_bit(then, pe_action_runnable);
         /* We are relying on the pe_order_one_or_more clause of
          * graph_update_action(), called as part of the:
          *
          *    'if (first == other->action)'
          *
          * block below, to set this back if appropriate
          */
     }
 
     for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
         action_wrapper_t *other = (action_wrapper_t *) lpc->data;
         action_t *first = other->action;
 
         node_t *then_node = then->node;
         node_t *first_node = first->node;
 
         enum pe_action_flags then_flags = 0;
         enum pe_action_flags first_flags = 0;
 
         if (first->rsc && first->rsc->variant == pe_group && safe_str_eq(first->task, RSC_START)) {
             first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
             if (first_node) {
                 crm_trace("First: Found node %s for %s", first_node->details->uname, first->uuid);
             }
         }
 
         if (then->rsc && then->rsc->variant == pe_group && safe_str_eq(then->task, RSC_START)) {
             then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
             if (then_node) {
                 crm_trace("Then: Found node %s for %s", then_node->details->uname, then->uuid);
             }
         }
         /* Disable constraint if it only applies when on same node, but isn't */
         if (is_set(other->type, pe_order_same_node) && first_node && then_node
             && (first_node->details != then_node->details)) {
 
             crm_trace("Disabled constraint %s on %s -> %s on %s",
                        other->action->uuid, first_node->details->uname,
                        then->uuid, then_node->details->uname);
             other->type = pe_order_none;
             continue;
         }
 
         clear_bit(changed, pe_graph_updated_first);
 
         if (first->rsc && then->rsc && (first->rsc != then->rsc)
             && (is_parent(then->rsc, first->rsc) == FALSE)) {
             first = rsc_expand_action(first);
         }
         if (first != other->action) {
             crm_trace("Ordering %s after %s instead of %s", then->uuid, first->uuid,
                       other->action->uuid);
         }
 
         first_flags = get_action_flags(first, then_node);
         then_flags = get_action_flags(then, first_node);
 
         crm_trace("Checking %s (%s %s %s) against %s (%s %s %s) filter=0x%.6x type=0x%.6x",
                   then->uuid,
                   is_set(then_flags, pe_action_optional) ? "optional" : "required",
                   is_set(then_flags, pe_action_runnable) ? "runnable" : "unrunnable",
                   is_set(then_flags,
                          pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->
                   uname : "", first->uuid, is_set(first_flags,
                                                   pe_action_optional) ? "optional" : "required",
                   is_set(first_flags, pe_action_runnable) ? "runnable" : "unrunnable",
                   is_set(first_flags,
                          pe_action_pseudo) ? "pseudo" : first->node ? first->node->details->
                   uname : "", first_flags, other->type);
 
         if (first == other->action) {
             /*
              * 'first' was not expanded (e.g. from 'start' to 'running'), which could mean it:
              * - has no associated resource,
              * - was a primitive,
              * - was pre-expanded (e.g. 'running' instead of 'start')
              *
              * The third argument here to graph_update_action() is a node which is used under two conditions:
              * - Interleaving, in which case first->node and
              *   then->node are equal (and NULL)
              * - If 'then' is a clone, to limit the scope of the
              *   constraint to instances on the supplied node
              *
              */
             int otype = other->type;
             node_t *node = then->node;
 
             if(is_set(otype, pe_order_implies_then_on_node)) {
                 /* Normally we want the _whole_ 'then' clone to
                  * restart if 'first' is restarted, so then->node is
                  * needed.
                  *
                  * However for unfencing, we want to limit this to
                  * instances on the same node as 'first' (the
                  * unfencing operation), so first->node is supplied.
                  *
                  * Swap the node, from then on we can can treat it
                  * like any other 'pe_order_implies_then'
                  */
 
                 clear_bit(otype, pe_order_implies_then_on_node);
                 set_bit(otype, pe_order_implies_then);
                 node = first->node;
             }
             clear_bit(first_flags, pe_action_pseudo);
 
             changed |= graph_update_action(first, then, node, first_flags, then_flags, otype);
 
             /* 'first' was for a complex resource (clone, group, etc),
              * create a new dependency if necessary
              */
         } else if (order_actions(first, then, other->type)) {
             /* This was the first time 'first' and 'then' were associated,
              * start again to get the new actions_before list
              */
             changed |= (pe_graph_updated_then | pe_graph_disable);
         }
 
         if (changed & pe_graph_disable) {
             crm_trace("Disabled constraint %s -> %s in favor of %s -> %s",
                       other->action->uuid, then->uuid, first->uuid, then->uuid);
             clear_bit(changed, pe_graph_disable);
             other->type = pe_order_none;
         }
 
         if (changed & pe_graph_updated_first) {
             GListPtr lpc2 = NULL;
 
             crm_trace("Updated %s (first %s %s %s), processing dependents ",
                       first->uuid,
                       is_set(first->flags, pe_action_optional) ? "optional" : "required",
                       is_set(first->flags, pe_action_runnable) ? "runnable" : "unrunnable",
                       is_set(first->flags,
                              pe_action_pseudo) ? "pseudo" : first->node ? first->node->details->
                       uname : "");
             for (lpc2 = first->actions_after; lpc2 != NULL; lpc2 = lpc2->next) {
                 action_wrapper_t *other = (action_wrapper_t *) lpc2->data;
 
                 update_action(other->action);
             }
             update_action(first);
         }
     }
 
     if (is_set(then->flags, pe_action_requires_any)) {
         if (last_flags != then->flags) {
             changed |= pe_graph_updated_then;
         } else {
             clear_bit(changed, pe_graph_updated_then);
         }
     }
 
     if (changed & pe_graph_updated_then) {
         crm_trace("Updated %s (then %s %s %s), processing dependents ",
                   then->uuid,
                   is_set(then->flags, pe_action_optional) ? "optional" : "required",
                   is_set(then->flags, pe_action_runnable) ? "runnable" : "unrunnable",
                   is_set(then->flags,
                          pe_action_pseudo) ? "pseudo" : then->node ? then->node->details->
                   uname : "");
 
         if (is_set(last_flags, pe_action_runnable) && is_not_set(then->flags, pe_action_runnable)) {
             update_colo_start_chain(then);
         }
         update_action(then);
         for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
             action_wrapper_t *other = (action_wrapper_t *) lpc->data;
 
             update_action(other->action);
         }
     }
 
     return FALSE;
 }
 
 gboolean
 shutdown_constraints(node_t * node, action_t * shutdown_op, pe_working_set_t * data_set)
 {
     /* add the stop to the before lists so it counts as a pre-req
      * for the shutdown
      */
     GListPtr lpc = NULL;
 
     for (lpc = data_set->actions; lpc != NULL; lpc = lpc->next) {
         action_t *action = (action_t *) lpc->data;
 
         if (action->rsc == NULL || action->node == NULL) {
             continue;
         } else if (action->node->details != node->details) {
             continue;
         } else if (is_set(action->rsc->flags, pe_rsc_maintenance)) {
             pe_rsc_trace(action->rsc, "Skipping %s: maintenance mode", action->uuid);
             continue;
         } else if (node->details->maintenance) {
             pe_rsc_trace(action->rsc, "Skipping %s: node %s is in maintenance mode",
                          action->uuid, node->details->uname);
             continue;
         } else if (safe_str_neq(action->task, RSC_STOP)) {
             continue;
         } else if (is_not_set(action->rsc->flags, pe_rsc_managed)
                    && is_not_set(action->rsc->flags, pe_rsc_block)) {
             /*
              * If another action depends on this one, we may still end up blocking
              */
             pe_rsc_trace(action->rsc, "Skipping %s: unmanaged", action->uuid);
             continue;
         }
 
         pe_rsc_trace(action->rsc, "Ordering %s before shutdown on %s", action->uuid,
                      node->details->uname);
         pe_clear_action_bit(action, pe_action_optional);
         custom_action_order(action->rsc, NULL, action,
                             NULL, strdup(CRM_OP_SHUTDOWN), shutdown_op,
                             pe_order_optional | pe_order_runnable_left, data_set);
     }
 
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Order all actions appropriately relative to a fencing operation
  *
  * Ensure start operations of affected resources are ordered after fencing,
  * imply stop and demote operations of affected resources by marking them as
  * pseudo-actions, etc.
  *
  * \param[in]     node        Node to be fenced
  * \param[in]     stonith_op  Fencing operation
  * \param[in,out] data_set    Working set of cluster
  */
 gboolean
 stonith_constraints(node_t * node, action_t * stonith_op, pe_working_set_t * data_set)
 {
     GListPtr r = NULL;
 
     CRM_CHECK(stonith_op != NULL, return FALSE);
     for (r = data_set->resources; r != NULL; r = r->next) {
         rsc_stonith_ordering((resource_t *) r->data, stonith_op, data_set);
     }
     return TRUE;
 }
 
 static node_t *
 get_router_node(action_t *action)
 {
     node_t *began_on = NULL;
     node_t *ended_on = NULL;
     node_t *router_node = NULL;
 
     if (safe_str_eq(action->task, CRM_OP_FENCE) || is_remote_node(action->node) == FALSE) {
         return NULL;
     }
 
     CRM_ASSERT(action->node->details->remote_rsc != NULL);
 
     if (action->node->details->remote_rsc->running_on) {
         began_on = action->node->details->remote_rsc->running_on->data;
     }
     ended_on = action->node->details->remote_rsc->allocated_to;
 
     /* if there is only one location to choose from,
      * this is easy. Check for those conditions first */
     if (!began_on || !ended_on) {
         /* remote rsc is either shutting down or starting up */
         return began_on ? began_on : ended_on;
     } else if (began_on->details == ended_on->details) {
         /* remote rsc didn't move nodes. */
         return began_on;
     }
 
     /* If we have get here, we know the remote resource
      * began on one node and is moving to another node.
      *
      * This means some actions will get routed through the cluster
      * node the connection rsc began on, and others are routed through
      * the cluster node the connection rsc ends up on.
      *
      * 1. stop, demote, migrate actions of resources living in the remote
      *    node _MUST_ occur _BEFORE_ the connection can move (these actions
      *    are all required before the remote rsc stop action can occur.) In
      *    this case, we know these actions have to be routed through the initial
      *    cluster node the connection resource lived on before the move takes place.
      *
      * 2. Everything else (start, promote, monitor, probe, refresh, clear failcount
      *    delete ....) must occur after the resource starts on the node it is
      *    moving to.
      */
 
     /* 1. before connection rsc moves. */
     if (safe_str_eq(action->task, "stop") ||
         safe_str_eq(action->task, "demote") ||
         safe_str_eq(action->task, "migrate_from") ||
         safe_str_eq(action->task, "migrate_to")) {
 
         router_node = began_on;
 
     /* 2. after connection rsc moves. */
     } else {
         router_node = ended_on;
     }
     return router_node;
 }
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified ID
  *
  * \param[in]     id      Node UUID to add
  * \param[in,out] xml     Parent XML tag to add to
  */
 static xmlNode*
 add_node_to_xml_by_id(const char *id, xmlNode *xml)
 {
     xmlNode *node_xml;
 
     node_xml = create_xml_node(xml, XML_CIB_TAG_NODE);
     crm_xml_add(node_xml, XML_ATTR_UUID, id);
 
     return node_xml;
 }
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified node
  *
  * \param[in]     node  Node to add
  * \param[in,out] xml   XML to add node to
  */
 static void
 add_node_to_xml(const node_t *node, void *xml)
 {
     add_node_to_xml_by_id(node->details->id, (xmlNode *) xml);
 }
 
 /*!
  * \internal
  * \brief Add XML with nodes that need an update of their maintenance state
  *
  * \param[in,out] xml       Parent XML tag to add to
  * \param[in]     data_set  Working set for cluster
  */
 static int
 add_maintenance_nodes(xmlNode *xml, const pe_working_set_t *data_set)
 {
     GListPtr gIter = NULL;
     xmlNode *maintenance =
         xml?create_xml_node(xml, XML_GRAPH_TAG_MAINTENANCE):NULL;
     int count = 0;
 
     for (gIter = data_set->nodes; gIter != NULL;
          gIter = gIter->next) {
         node_t *node = (node_t *) gIter->data;
         struct node_shared_s *details = node->details;
 
         if (!(is_remote_node(node))) {
             continue; /* just remote nodes need to know atm */
         }
 
         if (details->maintenance != details->remote_maintenance) {
             if (maintenance) {
                 crm_xml_add(
                     add_node_to_xml_by_id(node->details->id, maintenance),
                     XML_NODE_IS_MAINTENANCE, details->maintenance?"1":"0");
             }
             count++;
         }
     }
     crm_trace("%s %d nodes to adjust maintenance-mode "
               "to transition", maintenance?"Added":"Counted", count);
     return count;
 }
 
 /*!
  * \internal
  * \brief Add pseudo action with nodes needing maintenance state update
  *
  * \param[in,out] data_set  Working set for cluster
  */
 void
 add_maintenance_update(pe_working_set_t *data_set)
 {
     action_t *action = NULL;
 
     if (add_maintenance_nodes(NULL, data_set)) {
         crm_trace("adding maintenance state update pseudo action");
         action = get_pseudo_op(CRM_OP_MAINTENANCE_NODES, data_set);
         set_bit(action->flags, pe_action_print_always);
     }
 }
 
 /*!
  * \internal
  * \brief Add XML with nodes that an action is expected to bring down
  *
  * If a specified action is expected to bring any nodes down, add an XML block
  * with their UUIDs. When a node is lost, this allows the crmd to determine
  * whether it was expected.
  *
  * \param[in,out] xml       Parent XML tag to add to
  * \param[in]     action    Action to check for downed nodes
  * \param[in]     data_set  Working set for cluster
  */
 static void
 add_downed_nodes(xmlNode *xml, const action_t *action,
                  const pe_working_set_t *data_set)
 {
     CRM_CHECK(xml && action && action->node && data_set, return);
 
     if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
 
         /* Shutdown makes the action's node down */
         xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
         add_node_to_xml_by_id(action->node->details->id, downed);
 
     } else if (safe_str_eq(action->task, CRM_OP_FENCE)) {
 
         /* Fencing makes the action's node and any hosted guest nodes down */
         const char *fence = g_hash_table_lookup(action->meta, "stonith_action");
 
         if (safe_str_eq(fence, "off") || safe_str_eq(fence, "reboot")) {
             xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
             add_node_to_xml_by_id(action->node->details->id, downed);
             pe_foreach_guest_node(data_set, action->node, add_node_to_xml, downed);
         }
 
     } else if (action->rsc && action->rsc->is_remote_node
                && safe_str_eq(action->task, CRMD_ACTION_STOP)) {
 
         /* Stopping a remote connection resource makes connected node down,
          * unless it's part of a migration
          */
         GListPtr iter;
         action_t *input;
         gboolean migrating = FALSE;
 
         for (iter = action->actions_before; iter != NULL; iter = iter->next) {
             input = ((action_wrapper_t *) iter->data)->action;
             if (input->rsc && safe_str_eq(action->rsc->id, input->rsc->id)
                && safe_str_eq(input->task, CRMD_ACTION_MIGRATED)) {
                 migrating = TRUE;
                 break;
             }
         }
         if (!migrating) {
             xmlNode *downed = create_xml_node(xml, XML_GRAPH_TAG_DOWNED);
             add_node_to_xml_by_id(action->rsc->id, downed);
         }
     }
 }
 
 static xmlNode *
 action2xml(action_t * action, gboolean as_input, pe_working_set_t *data_set)
 {
     gboolean needs_node_info = TRUE;
     gboolean needs_maintenance_info = FALSE;
     xmlNode *action_xml = NULL;
     xmlNode *args_xml = NULL;
 #if ENABLE_VERSIONED_ATTRS
     pe_rsc_action_details_t *rsc_details = NULL;
 #endif
 
     if (action == NULL) {
         return NULL;
     }
 
     if (safe_str_eq(action->task, CRM_OP_FENCE)) {
         /* All fences need node info; guest node fences are pseudo-events */
         action_xml = create_xml_node(NULL,
                                      is_set(action->flags, pe_action_pseudo)?
                                      XML_GRAPH_TAG_PSEUDO_EVENT :
                                      XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (safe_str_eq(action->task, CRM_OP_CLEAR_FAILCOUNT)) {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
 
     } else if (safe_str_eq(action->task, CRM_OP_LRM_REFRESH)) {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT);
 
 /* 	} else if(safe_str_eq(action->task, RSC_PROBED)) { */
 /* 		action_xml = create_xml_node(NULL, XML_GRAPH_TAG_CRM_EVENT); */
 
     } else if (is_set(action->flags, pe_action_pseudo)) {
         if (safe_str_eq(action->task, CRM_OP_MAINTENANCE_NODES)) {
             needs_maintenance_info = TRUE;
         }
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_PSEUDO_EVENT);
         needs_node_info = FALSE;
 
     } else {
         action_xml = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
 #if ENABLE_VERSIONED_ATTRS
         rsc_details = pe_rsc_action_details(action);
 #endif
     }
 
     crm_xml_add_int(action_xml, XML_ATTR_ID, action->id);
     crm_xml_add(action_xml, XML_LRM_ATTR_TASK, action->task);
     if (action->rsc != NULL && action->rsc->clone_name != NULL) {
         char *clone_key = NULL;
         const char *interval_s = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
         int interval = crm_parse_int(interval_s, "0");
 
         if (safe_str_eq(action->task, RSC_NOTIFY)) {
             const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
             const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
 
             CRM_CHECK(n_type != NULL, crm_err("No notify type value found for %s", action->uuid));
             CRM_CHECK(n_task != NULL,
                       crm_err("No notify operation value found for %s", action->uuid));
             clone_key = generate_notify_key(action->rsc->clone_name, n_type, n_task);
 
         } else if(action->cancel_task) {
             clone_key = generate_op_key(action->rsc->clone_name, action->cancel_task, interval);
         } else {
             clone_key = generate_op_key(action->rsc->clone_name, action->task, interval);
         }
 
         CRM_CHECK(clone_key != NULL, crm_err("Could not generate a key for %s", action->uuid));
         crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, clone_key);
         crm_xml_add(action_xml, "internal_" XML_LRM_ATTR_TASK_KEY, action->uuid);
         free(clone_key);
 
     } else {
         crm_xml_add(action_xml, XML_LRM_ATTR_TASK_KEY, action->uuid);
     }
 
     if (needs_node_info && action->node != NULL) {
         node_t *router_node = get_router_node(action);
 
         crm_xml_add(action_xml, XML_LRM_ATTR_TARGET, action->node->details->uname);
         crm_xml_add(action_xml, XML_LRM_ATTR_TARGET_UUID, action->node->details->id);
         if (router_node) {
             crm_xml_add(action_xml, XML_LRM_ATTR_ROUTER_NODE, router_node->details->uname);
         }
 
         g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET), strdup(action->node->details->uname));
         g_hash_table_insert(action->meta, strdup(XML_LRM_ATTR_TARGET_UUID), strdup(action->node->details->id));
     }
 
     /* No details if this action is only being listed in the inputs section */
     if (as_input) {
         return action_xml;
     }
 
     /* List affected resource */
     if (action->rsc) {
         if (is_set(action->flags, pe_action_pseudo) == FALSE) {
             int lpc = 0;
 
             xmlNode *rsc_xml = create_xml_node(action_xml, crm_element_name(action->rsc->xml));
 
             const char *attr_list[] = {
                 XML_AGENT_ATTR_CLASS,
                 XML_AGENT_ATTR_PROVIDER,
                 XML_ATTR_TYPE
             };
 
             if (is_set(action->rsc->flags, pe_rsc_orphan) && action->rsc->clone_name) {
                 /* Do not use the 'instance free' name here as that
                  * might interfere with the instance we plan to keep.
                  * Ie. if there are more than two named /anonymous/
                  * instances on a given node, we need to make sure the
                  * command goes to the right one.
                  *
                  * Keep this block, even when everyone is using
                  * 'instance free' anonymous clone names - it means
                  * we'll do the right thing if anyone toggles the
                  * unique flag to 'off'
                  */
                 crm_debug("Using orphan clone name %s instead of %s", action->rsc->id,
                           action->rsc->clone_name);
                 crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->clone_name);
                 crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
 
             } else if (is_not_set(action->rsc->flags, pe_rsc_unique)) {
                 const char *xml_id = ID(action->rsc->xml);
 
                 crm_debug("Using anonymous clone name %s for %s (aka. %s)", xml_id, action->rsc->id,
                           action->rsc->clone_name);
 
                 /* ID is what we'd like client to use
                  * ID_LONG is what they might know it as instead
                  *
                  * ID_LONG is only strictly needed /here/ during the
                  * transition period until all nodes in the cluster
                  * are running the new software /and/ have rebooted
                  * once (meaning that they've only ever spoken to a DC
                  * supporting this feature).
                  *
                  * If anyone toggles the unique flag to 'on', the
                  * 'instance free' name will correspond to an orphan
                  * and fall into the clause above instead
                  */
                 crm_xml_add(rsc_xml, XML_ATTR_ID, xml_id);
                 if (action->rsc->clone_name && safe_str_neq(xml_id, action->rsc->clone_name)) {
                     crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->clone_name);
                 } else {
                     crm_xml_add(rsc_xml, XML_ATTR_ID_LONG, action->rsc->id);
                 }
 
             } else {
                 CRM_ASSERT(action->rsc->clone_name == NULL);
                 crm_xml_add(rsc_xml, XML_ATTR_ID, action->rsc->id);
             }
 
             for (lpc = 0; lpc < DIMOF(attr_list); lpc++) {
                 crm_xml_add(rsc_xml, attr_list[lpc],
                             g_hash_table_lookup(action->rsc->meta, attr_list[lpc]));
             }
         }
     }
 
     /* List any attributes in effect */
     args_xml = create_xml_node(NULL, XML_TAG_ATTRS);
     crm_xml_add(args_xml, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
 
     g_hash_table_foreach(action->extra, hash2field, args_xml);
     if (action->rsc != NULL && action->node) {
         GHashTable *p = crm_str_table_new();
 
         get_rsc_attributes(p, action->rsc, action->node, data_set);
         g_hash_table_foreach(p, hash2smartfield, args_xml);
         g_hash_table_destroy(p);
 
 #if ENABLE_VERSIONED_ATTRS
         {
             xmlNode *versioned_parameters = create_xml_node(NULL, XML_TAG_RSC_VER_ATTRS);
 
             pe_get_versioned_attributes(versioned_parameters, action->rsc,
                                         action->node, data_set);
             if (xml_has_children(versioned_parameters)) {
                 add_node_copy(action_xml, versioned_parameters);
             }
             free_xml(versioned_parameters);
         }
 #endif
 
     } else if(action->rsc && action->rsc->variant <= pe_native) {
         g_hash_table_foreach(action->rsc->parameters, hash2smartfield, args_xml);
 
 #if ENABLE_VERSIONED_ATTRS
         if (xml_has_children(action->rsc->versioned_parameters)) {
             add_node_copy(action_xml, action->rsc->versioned_parameters);
         }
 #endif
     }
 
 #if ENABLE_VERSIONED_ATTRS
     if (rsc_details) {
         if (xml_has_children(rsc_details->versioned_parameters)) {
             add_node_copy(action_xml, rsc_details->versioned_parameters);
         }
 
         if (xml_has_children(rsc_details->versioned_meta)) {
             add_node_copy(action_xml, rsc_details->versioned_meta);
         }
     }
 #endif
 
     g_hash_table_foreach(action->meta, hash2metafield, args_xml);
     if (action->rsc != NULL) {
         int isolated = 0;
         const char *value = g_hash_table_lookup(action->rsc->meta, "external-ip");
         resource_t *parent = action->rsc;
 
         while (parent != NULL) {
             isolated |= parent->isolation_wrapper ? 1 : 0;
             parent->cmds->append_meta(parent, args_xml);
             parent = parent->parent;
         }
 
         if (isolated && action->node) {
             char *nodeattr = crm_meta_name(XML_RSC_ATTR_ISOLATION_HOST);
             crm_xml_add(args_xml, nodeattr, action->node->details->uname);
             free(nodeattr);
         }
 
         if(value) {
             hash2smartfield((gpointer)"pcmk_external_ip", (gpointer)value, (gpointer)args_xml);
         }
 
         if(is_container_remote_node(action->node)) {
             pe_node_t *host = NULL;
             enum action_tasks task = text2task(action->task);
 
             if(task == action_notify || task == action_notified) {
                 const char *n_task = g_hash_table_lookup(action->meta, "notify_operation");
                 task = text2task(n_task);
             }
 
             // Differentiate between up and down actions
             switch (task) {
                 case stop_rsc:
                 case stopped_rsc:
                 case action_demote:
                 case action_demoted:
                     if(action->node->details->remote_rsc->container->running_on) {
                         host = action->node->details->remote_rsc->container->running_on->data;
                     }
                     break;
                 case start_rsc:
                 case started_rsc:
                 case monitor_rsc:
                 case action_promote:
                 case action_promoted:
                     if(action->node->details->remote_rsc->container->allocated_to) {
                         host = action->node->details->remote_rsc->container->allocated_to;
                     }
                     break;
                 default:
                     break;
             }
 
             if(host) {
                 hash2metafield((gpointer)XML_RSC_ATTR_TARGET,
                                (gpointer)g_hash_table_lookup(action->rsc->meta, XML_RSC_ATTR_TARGET), (gpointer)args_xml);
                 hash2metafield((gpointer)PCMK_ENV_PHYSICAL_HOST, (gpointer)host->details->uname, (gpointer)args_xml);
             }
         }
 
     } else if (safe_str_eq(action->task, CRM_OP_FENCE) && action->node) {
         /* Pass the node's attributes as meta-attributes.
          *
          * @TODO: Determine whether it is still necessary to do this. It was
          * added in 33d99707, probably for the libfence-based implementation in
          * c9a90bd, which is no longer used.
          */
         g_hash_table_foreach(action->node->details->attrs, hash2metafield, args_xml);
     }
 
     sorted_xml(args_xml, action_xml, FALSE);
     free_xml(args_xml);
 
     /* List any nodes this action is expected to make down */
     if (needs_node_info && (action->node != NULL)) {
         add_downed_nodes(action_xml, action, data_set);
     }
 
     if (needs_maintenance_info) {
         add_maintenance_nodes(action_xml, data_set);
     }
 
     crm_log_xml_trace(action_xml, "dumped action");
     return action_xml;
 }
 
 static gboolean
 should_dump_action(action_t * action)
 {
     CRM_CHECK(action != NULL, return FALSE);
 
     if (is_set(action->flags, pe_action_dumped)) {
         crm_trace("action %d (%s) was already dumped", action->id, action->uuid);
         return FALSE;
 
     } else if (is_set(action->flags, pe_action_pseudo) && safe_str_eq(action->task, CRM_OP_PROBED)) {
         GListPtr lpc = NULL;
 
         /* This is a horrible but convenient hack
          *
          * It mimimizes the number of actions with unsatisfied inputs
          * (i.e. not included in the graph)
          *
          * This in turn, means we can be more concise when printing
          * aborted/incomplete graphs.
          *
          * It also makes it obvious which node is preventing
          * probe_complete from running (presumably because it is only
          * partially up)
          *
          * For these reasons we tolerate such perversions
          */
 
         for (lpc = action->actions_after; lpc != NULL; lpc = lpc->next) {
             action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data;
 
             if (is_not_set(wrapper->action->flags, pe_action_runnable)) {
                 /* Only interested in runnable operations */
             } else if (safe_str_neq(wrapper->action->task, RSC_START)) {
                 /* Only interested in start operations */
             } else if (is_set(wrapper->action->flags, pe_action_dumped)) {
                 crm_trace("action %d (%s) dependency of %s",
                           action->id, action->uuid, wrapper->action->uuid);
                 return TRUE;
 
             } else if (should_dump_action(wrapper->action)) {
                 crm_trace("action %d (%s) dependency of %s",
                           action->id, action->uuid, wrapper->action->uuid);
                 return TRUE;
             }
         }
     }
 
     if (is_set(action->flags, pe_action_runnable) == FALSE) {
         crm_trace("action %d (%s) was not runnable", action->id, action->uuid);
         return FALSE;
 
     } else if (is_set(action->flags, pe_action_optional)
                && is_set(action->flags, pe_action_print_always) == FALSE) {
         crm_trace("action %d (%s) was optional", action->id, action->uuid);
         return FALSE;
 
     } else if (action->rsc != NULL && is_not_set(action->rsc->flags, pe_rsc_managed)) {
         const char *interval = NULL;
 
         interval = g_hash_table_lookup(action->meta, XML_LRM_ATTR_INTERVAL);
 
         /* make sure probes and recurring monitors go through */
         if (safe_str_neq(action->task, RSC_STATUS) && interval == NULL) {
             crm_trace("action %d (%s) was for an unmanaged resource (%s)",
                       action->id, action->uuid, action->rsc->id);
             return FALSE;
         }
     }
 
     if (is_set(action->flags, pe_action_pseudo)
         || safe_str_eq(action->task, CRM_OP_FENCE)
         || safe_str_eq(action->task, CRM_OP_SHUTDOWN)) {
         /* skip the next checks */
         return TRUE;
     }
 
     if (action->node == NULL) {
         pe_err("action %d (%s) was not allocated", action->id, action->uuid);
         log_action(LOG_DEBUG, "Unallocated action", action, FALSE);
         return FALSE;
 
     } else if(is_container_remote_node(action->node) && action->node->details->remote_requires_reset == FALSE) {
         crm_trace("Assuming action %s for %s will be runnable", action->uuid, action->node->details->uname);
 
     } else if (action->node->details->online == FALSE) {
         pe_err("action %d was (%s) scheduled for offline node", action->id, action->uuid);
         log_action(LOG_DEBUG, "Action for offline node", action, FALSE);
         return FALSE;
 #if 0
         /* but this would also affect resources that can be safely
          *  migrated before a fencing op
          */
     } else if (action->node->details->unclean == FALSE) {
         pe_err("action %d was (%s) scheduled for unclean node", action->id, action->uuid);
         log_action(LOG_DEBUG, "Action for unclean node", action, FALSE);
         return FALSE;
 #endif
     }
     return TRUE;
 }
 
 /* lowest to highest */
 static gint
 sort_action_id(gconstpointer a, gconstpointer b)
 {
     const action_wrapper_t *action_wrapper2 = (const action_wrapper_t *)a;
     const action_wrapper_t *action_wrapper1 = (const action_wrapper_t *)b;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     if (action_wrapper1->action->id > action_wrapper2->action->id) {
         return -1;
     }
 
     if (action_wrapper1->action->id < action_wrapper2->action->id) {
         return 1;
     }
     return 0;
 }
 
 static gboolean
 check_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper)
 {
     int type = wrapper->type;
 
     if (wrapper->state == pe_link_dumped) {
         return TRUE;
 
     } else if (wrapper->state == pe_link_dup) {
         return FALSE;
     }
 
     type &= ~pe_order_implies_first_printed;
     type &= ~pe_order_implies_then_printed;
     type &= ~pe_order_optional;
 
     if (is_not_set(type, pe_order_preserve)
         && action->rsc && action->rsc->fillers
         && wrapper->action->rsc && wrapper->action->node
         && wrapper->action->node->details->remote_rsc
         && (wrapper->action->node->details->remote_rsc->container == action->rsc)) {
         /* This prevents user-defined ordering constraints between resources
          * running in a guest node and the resource that defines that node.
          */
         crm_warn("Invalid ordering constraint between %s and %s",
                  wrapper->action->rsc->id, action->rsc->id);
         wrapper->type = pe_order_none;
         return FALSE;
     }
 
     if (last_action == wrapper->action->id) {
         crm_trace("Input (%d) %s duplicated for %s",
                   wrapper->action->id, wrapper->action->uuid, action->uuid);
         wrapper->state = pe_link_dup;
         return FALSE;
 
     } else if (wrapper->type == pe_order_none) {
         crm_trace("Input (%d) %s suppressed for %s",
                   wrapper->action->id, wrapper->action->uuid, action->uuid);
         return FALSE;
 
     } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE
                && type == pe_order_none && safe_str_neq(wrapper->action->uuid, CRM_OP_PROBED)) {
         crm_trace("Input (%d) %s optional (ordering) for %s",
                   wrapper->action->id, wrapper->action->uuid, action->uuid);
         return FALSE;
 
     } else if (is_set(wrapper->action->flags, pe_action_runnable) == FALSE
                && is_set(type, pe_order_one_or_more)) {
         crm_trace("Input (%d) %s optional (one-or-more) for %s",
                   wrapper->action->id, wrapper->action->uuid, action->uuid);
         return FALSE;
 
     } else if (is_set(action->flags, pe_action_pseudo)
                && (wrapper->type & pe_order_stonith_stop)) {
         crm_trace("Input (%d) %s suppressed for %s",
                   wrapper->action->id, wrapper->action->uuid, action->uuid);
         return FALSE;
 
     } else if ((wrapper->type & pe_order_implies_first_migratable) && (is_set(wrapper->action->flags, pe_action_runnable) == FALSE)) {
         return FALSE;
 
     } else if ((wrapper->type & pe_order_apply_first_non_migratable)
                 && (is_set(wrapper->action->flags, pe_action_migrate_runnable))) {
         return FALSE;
 
     } else if ((wrapper->type == pe_order_optional)
                && crm_ends_with(wrapper->action->uuid, "_stop_0")
                && is_set(wrapper->action->flags, pe_action_migrate_runnable)) {
 
         /* for optional only ordering, ordering is not preserved for
          * a stop action that is actually involved with a migration. */
         return FALSE;
 
     } else if (wrapper->type == pe_order_load) {
         crm_trace("check load filter %s.%s -> %s.%s",
                   wrapper->action->uuid,
                   wrapper->action->node ? wrapper->action->node->details->uname : "", action->uuid,
                   action->node ? action->node->details->uname : "");
 
         if (action->rsc && safe_str_eq(action->task, RSC_MIGRATE)) {
             /* Remove the orders like the following if not relevant:
              *     "load_stopped_node2" -> "rscA_migrate_to node1"
              * which were created also from: pengine/native.c: MigrateRsc()
              *     order_actions(other, then, other_w->type);
              */
 
             /* For migrate_to ops, we care about where it has been
              * allocated to, not where the action will be executed
              */
             if (wrapper->action->node == NULL || action->rsc->allocated_to == NULL
                 || wrapper->action->node->details != action->rsc->allocated_to->details) {
                 /* Check if the actions are for the same node, ignore otherwise */
                 crm_trace("load filter - migrate");
                 wrapper->type = pe_order_none;
                 return FALSE;
             }
 
         } else if (wrapper->action->node == NULL || action->node == NULL
                    || wrapper->action->node->details != action->node->details) {
             /* Check if the actions are for the same node, ignore otherwise */
             crm_trace("load filter - node");
             wrapper->type = pe_order_none;
             return FALSE;
 
         } else if (is_set(wrapper->action->flags, pe_action_optional)) {
             /* Check if the pre-req is optional, ignore if so */
             crm_trace("load filter - optional");
             wrapper->type = pe_order_none;
             return FALSE;
         }
 
     } else if (wrapper->type == pe_order_anti_colocation) {
         crm_trace("check anti-colocation filter %s.%s -> %s.%s",
                   wrapper->action->uuid,
                   wrapper->action->node ? wrapper->action->node->details->uname : "",
                   action->uuid,
                   action->node ? action->node->details->uname : "");
 
         if (wrapper->action->node && action->node
             && wrapper->action->node->details != action->node->details) {
             /* Check if the actions are for the same node, ignore otherwise */
             crm_trace("anti-colocation filter - node");
             wrapper->type = pe_order_none;
             return FALSE;
 
         } else if (is_set(wrapper->action->flags, pe_action_optional)) {
             /* Check if the pre-req is optional, ignore if so */
             crm_trace("anti-colocation filter - optional");
             wrapper->type = pe_order_none;
             return FALSE;
         }
 
     } else if (wrapper->action->rsc
                && wrapper->action->rsc != action->rsc
                && is_set(wrapper->action->rsc->flags, pe_rsc_failed)
                && is_not_set(wrapper->action->rsc->flags, pe_rsc_managed)
                && crm_ends_with(wrapper->action->uuid, "_stop_0")
                && action->rsc && pe_rsc_is_clone(action->rsc)) {
         crm_warn("Ignoring requirement that %s complete before %s:"
                  " unmanaged failed resources cannot prevent clone shutdown",
                  wrapper->action->uuid, action->uuid);
         return FALSE;
 
     } else if (is_set(wrapper->action->flags, pe_action_dumped)
                || should_dump_action(wrapper->action)) {
         crm_trace("Input (%d) %s should be dumped for %s", wrapper->action->id,
                   wrapper->action->uuid, action->uuid);
         goto dump;
 
 #if 0
     } else if (is_set(wrapper->action->flags, pe_action_runnable)
                && is_set(wrapper->action->flags, pe_action_pseudo)
                && wrapper->action->rsc->variant != pe_native) {
         crm_crit("Input (%d) %s should be dumped for %s",
                  wrapper->action->id, wrapper->action->uuid, action->uuid);
         goto dump;
 #endif
     } else if (is_set(wrapper->action->flags, pe_action_optional) == TRUE
                && is_set(wrapper->action->flags, pe_action_print_always) == FALSE) {
         crm_trace("Input (%d) %s optional for %s", wrapper->action->id,
                   wrapper->action->uuid, action->uuid);
         crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x",
                   wrapper->action->id, wrapper->action->uuid, wrapper->action->node,
                   is_set(wrapper->action->flags, pe_action_pseudo),
                   is_set(wrapper->action->flags, pe_action_runnable),
                   is_set(wrapper->action->flags, pe_action_optional),
                   is_set(wrapper->action->flags, pe_action_print_always), wrapper->type);
         return FALSE;
 
     }
 
   dump:
     return TRUE;
 }
 
 static gboolean
 graph_has_loop(action_t * init_action, action_t * action, action_wrapper_t * wrapper)
 {
     GListPtr lpc = NULL;
     gboolean has_loop = FALSE;
 
     if (is_set(wrapper->action->flags, pe_action_tracking)) {
         crm_trace("Breaking tracking loop: %s.%s -> %s.%s (0x%.6x)",
                   wrapper->action->uuid,
                   wrapper->action->node ? wrapper->action->node->details->uname : "",
                   action->uuid,
                   action->node ? action->node->details->uname : "",
                   wrapper->type);
         return FALSE;
     }
 
     if (check_dump_input(-1, action, wrapper) == FALSE) {
         return FALSE;
     }
 
     /* If there's any order like:
      * "rscB_stop node2"-> "load_stopped_node2" -> "rscA_migrate_to node1"
      * rscA is being migrated from node1 to node2,
      * while rscB is being migrated from node2 to node1.
      * There will be potential graph loop.
      * Break the order "load_stopped_node2" -> "rscA_migrate_to node1".
      */
 
     crm_trace("Checking graph loop: %s.%s -> %s.%s (0x%.6x)",
               wrapper->action->uuid,
               wrapper->action->node ? wrapper->action->node->details->uname : "",
               action->uuid,
               action->node ? action->node->details->uname : "",
               wrapper->type);
 
     if (wrapper->action == init_action) {
         crm_debug("Found graph loop: %s.%s ->...-> %s.%s",
                   action->uuid,
                   action->node ? action->node->details->uname : "",
                   init_action->uuid,
                   init_action->node ? init_action->node->details->uname : "");
 
         return TRUE;
     }
 
     set_bit(wrapper->action->flags, pe_action_tracking);
 
     for (lpc = wrapper->action->actions_before; lpc != NULL; lpc = lpc->next) {
         action_wrapper_t *wrapper_before = (action_wrapper_t *) lpc->data;
 
         if (graph_has_loop(init_action, wrapper->action, wrapper_before)) {
             has_loop = TRUE;
             goto done;
         }
     }
 
 done:
     pe_clear_action_bit(wrapper->action, pe_action_tracking);
 
     return has_loop;
 }
 
 static gboolean
 should_dump_input(int last_action, action_t * action, action_wrapper_t * wrapper)
 {
     wrapper->state = pe_link_not_dumped;
 
     if (check_dump_input(last_action, action, wrapper) == FALSE) {
         return FALSE;
     }
 
     if (wrapper->type == pe_order_load
         && action->rsc
         && safe_str_eq(action->task, RSC_MIGRATE)) {
         crm_trace("Checking graph loop - load migrate: %s.%s -> %s.%s",
                   wrapper->action->uuid,
                   wrapper->action->node ? wrapper->action->node->details->uname : "",
                   action->uuid,
                   action->node ? action->node->details->uname : "");
 
         if (graph_has_loop(action, action, wrapper)) {
             /* Remove the orders like the following if they are introducing any graph loops:
              *     "load_stopped_node2" -> "rscA_migrate_to node1"
              * which were created also from: pengine/native.c: MigrateRsc()
              *     order_actions(other, then, other_w->type);
              */
             crm_debug("Breaking graph loop - load migrate: %s.%s -> %s.%s",
                       wrapper->action->uuid,
                       wrapper->action->node ? wrapper->action->node->details->uname : "",
                       action->uuid,
                       action->node ? action->node->details->uname : "");
 
             wrapper->type = pe_order_none;
             return FALSE;
         }
     }
 
     crm_trace("Input (%d) %s n=%p p=%d r=%d o=%d a=%d f=0x%.6x dumped for %s",
               wrapper->action->id,
               wrapper->action->uuid,
               wrapper->action->node,
               is_set(wrapper->action->flags, pe_action_pseudo),
               is_set(wrapper->action->flags, pe_action_runnable),
               is_set(wrapper->action->flags, pe_action_optional),
               is_set(wrapper->action->flags, pe_action_print_always), wrapper->type, action->uuid);
     return TRUE;
 }
 
 void
 graph_element_from_action(action_t * action, pe_working_set_t * data_set)
 {
     GListPtr lpc = NULL;
     int last_action = -1;
     int synapse_priority = 0;
     xmlNode *syn = NULL;
     xmlNode *set = NULL;
     xmlNode *in = NULL;
     xmlNode *input = NULL;
     xmlNode *xml_action = NULL;
 
     if (should_dump_action(action) == FALSE) {
         return;
     }
 
     set_bit(action->flags, pe_action_dumped);
 
     syn = create_xml_node(data_set->graph, "synapse");
     set = create_xml_node(syn, "action_set");
     in = create_xml_node(syn, "inputs");
 
     crm_xml_add_int(syn, XML_ATTR_ID, data_set->num_synapse);
     data_set->num_synapse++;
 
     if (action->rsc != NULL) {
         synapse_priority = action->rsc->priority;
     }
     if (action->priority > synapse_priority) {
         synapse_priority = action->priority;
     }
     if (synapse_priority > 0) {
         crm_xml_add_int(syn, XML_CIB_ATTR_PRIORITY, synapse_priority);
     }
 
     xml_action = action2xml(action, FALSE, data_set);
     add_node_nocopy(set, crm_element_name(xml_action), xml_action);
 
     action->actions_before = g_list_sort(action->actions_before, sort_action_id);
 
     for (lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
         action_wrapper_t *wrapper = (action_wrapper_t *) lpc->data;
 
         if (should_dump_input(last_action, action, wrapper) == FALSE) {
             continue;
         }
 
         wrapper->state = pe_link_dumped;
         CRM_CHECK(last_action < wrapper->action->id,;
             );
         last_action = wrapper->action->id;
         input = create_xml_node(in, "trigger");
 
         xml_action = action2xml(wrapper->action, TRUE, data_set);
         add_node_nocopy(input, crm_element_name(xml_action), xml_action);
     }
 }
diff --git a/pengine/regression.sh b/pengine/regression.sh
index aeff13a26e..3ca17aa4de 100755
--- a/pengine/regression.sh
+++ b/pengine/regression.sh
@@ -1,893 +1,894 @@
 #!/bin/bash
 
  # Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  #
  # This program is free software; you can redistribute it and/or
  # modify it under the terms of the GNU General Public
  # License as published by the Free Software Foundation; either
  # version 2 of the License, or (at your option) any later version.
  #
  # This software is distributed in the hope that it will be useful,
  # but WITHOUT ANY WARRANTY; without even the implied warranty of
  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  # General Public License for more details.
  #
  # You should have received a copy of the GNU General Public
  # License along with this library; if not, write to the Free Software
  # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  #
 
 core=`dirname $0`
 . $core/regression.core.sh || exit 1
 
 DO_VERSIONED_TESTS=0
 
 create_mode="true"
 info Generating test outputs for these tests...
 # do_test file description
 info Done.
 echo ""
 
 info Performing the following tests from $io_dir
 create_mode="false"
 echo ""
 
 do_test simple1 "Offline     "
 do_test simple2 "Start       "
 do_test simple3 "Start 2     "
 do_test simple4 "Start Failed"
 do_test simple6 "Stop Start  "
 do_test simple7 "Shutdown    "
 #do_test simple8 "Stonith	"
 #do_test simple9 "Lower version"
 #do_test simple10 "Higher version"
 do_test simple11 "Priority (ne)"
 do_test simple12 "Priority (eq)"
 do_test simple8 "Stickiness"
 
 echo ""
 do_test group1 "Group		"
 do_test group2 "Group + Native	"
 do_test group3 "Group + Group	"
 do_test group4 "Group + Native (nothing)"
 do_test group5 "Group + Native (move)   "
 do_test group6 "Group + Group (move)    "
 do_test group7 "Group colocation"
 do_test group13 "Group colocation (cant run)"
 do_test group8 "Group anti-colocation"
 do_test group9 "Group recovery"
 do_test group10 "Group partial recovery"
 do_test group11 "Group target_role"
 do_test group14 "Group stop (graph terminated)"
 do_test group15 "-ve group colocation"
 do_test bug-1573 "Partial stop of a group with two children"
 do_test bug-1718 "Mandatory group ordering - Stop group_FUN"
 do_test bug-lf-2613 "Move group on failure"
 do_test bug-lf-2619 "Move group on clone failure"
 do_test group-fail "Ensure stop order is preserved for partially active groups"
 do_test group-unmanaged "No need to restart r115 because r114 is unmanaged"
 do_test group-unmanaged-stopped "Make sure r115 is stopped when r114 fails"
 do_test group-dependents "Account for the location preferences of things colocated with a group"
 
 echo ""
 do_test rsc_dep1 "Must not     "
 do_test rsc_dep3 "Must         "
 do_test rsc_dep5 "Must not 3   "
 do_test rsc_dep7 "Must 3       "
 do_test rsc_dep10 "Must (but cant)"
 do_test rsc_dep2  "Must (running) "
 do_test rsc_dep8  "Must (running : alt) "
 do_test rsc_dep4  "Must (running + move)"
 do_test asymmetric "Asymmetric - require explicit location constraints"
 
 echo ""
 do_test orphan-0 "Orphan ignore"
 do_test orphan-1 "Orphan stop"
 do_test orphan-2 "Orphan stop, remove failcount"
 
 echo ""
 do_test params-0 "Params: No change"
 do_test params-1 "Params: Changed"
 do_test params-2 "Params: Resource definition"
 do_test params-4 "Params: Reload"
 do_test params-5 "Params: Restart based on probe digest"
 do_test novell-251689 "Resource definition change + target_role=stopped"
 do_test bug-lf-2106 "Restart all anonymous clone instances after config change"
 do_test params-6 "Params: Detect reload in previously migrated resource"
 do_test nvpair-id-ref "Support id-ref in nvpair with optional name"
 do_test not-reschedule-unneeded-monitor "Do not reschedule unneeded monitors while resource definitions have changed"
 
 echo ""
 do_test target-0 "Target Role : baseline"
 do_test target-1 "Target Role : master"
 do_test target-2 "Target Role : invalid"
 
 echo ""
 do_test base-score "Set a node's default score for all nodes"
 
 echo ""
 do_test date-1 "Dates" -t "2005-020"
 do_test date-2 "Date Spec - Pass" -t "2005-020T12:30"
 do_test date-3 "Date Spec - Fail" -t "2005-020T11:30"
 do_test origin "Timing of recurring operations" -t "2014-05-07 00:28:00" 
 do_test probe-0 "Probe (anon clone)"
 do_test probe-1 "Pending Probe"
 do_test probe-2 "Correctly re-probe cloned groups"
 do_test probe-3 "Probe (pending node)"
 do_test probe-4 "Probe (pending node + stopped resource)"
 do_test standby "Standby"
 do_test comments "Comments"
 
 echo ""
 do_test one-or-more-0 "Everything starts"
 do_test one-or-more-1 "Nothing starts because of A"
 do_test one-or-more-2 "D can start because of C"
 do_test one-or-more-3 "D cannot start because of B and C"
 do_test one-or-more-4 "D cannot start because of target-role"
 do_test one-or-more-5 "Start A and F even though C and D are stopped"
 do_test one-or-more-6 "Leave A running even though B is stopped"
 do_test one-or-more-7 "Leave A running even though C is stopped"
 do_test bug-5140-require-all-false "Allow basegrp:0 to stop"
 do_test clone-require-all-1 "clone B starts node 3 and 4"
 do_test clone-require-all-2 "clone B remains stopped everywhere"
 do_test clone-require-all-3 "clone B stops everywhere because A stops everywhere"
 do_test clone-require-all-4 "clone B remains on node 3 and 4 with only one instance of A remaining."
 do_test clone-require-all-5 "clone B starts on node 1 3 and 4"
 do_test clone-require-all-6 "clone B remains active after shutting down instances of A"
 do_test clone-require-all-7 "clone A and B both start at the same time. all instances of A start before B."
 do_test clone-require-all-no-interleave-1 "C starts everywhere after A and B"
 do_test clone-require-all-no-interleave-2 "C starts on nodes 1, 2, and 4 with only one active instance of B"
 do_test clone-require-all-no-interleave-3 "C remains active when instance of B is stopped on one node and started on another."
 do_test one-or-more-unrunnnable-instances "Avoid dependencies on instances that won't ever be started"
 
 echo ""
 do_test order1 "Order start 1     "
 do_test order2 "Order start 2     "
 do_test order3 "Order stop	  "
 do_test order4 "Order (multiple)  "
 do_test order5 "Order (move)  "
 do_test order6 "Order (move w/ restart)  "
 do_test order7 "Order (mandatory)  "
 do_test order-optional "Order (score=0)  "
 do_test order-required "Order (score=INFINITY)  "
 do_test bug-lf-2171 "Prevent group start when clone is stopped"
 do_test order-clone "Clone ordering should be able to prevent startup of dependent clones"
 do_test order-sets "Ordering for resource sets"
 do_test order-serialize "Serialize resources without inhibiting migration"
 do_test order-serialize-set "Serialize a set of resources without inhibiting migration"
 do_test clone-order-primitive "Order clone start after a primitive"
 do_test clone-order-16instances "Verify ordering of 16 cloned resources"
 do_test order-optional-keyword "Order (optional keyword)"
 do_test order-mandatory "Order (mandatory keyword)"
 do_test bug-lf-2493 "Don't imply colocation requirements when applying ordering constraints with clones"
 do_test ordered-set-basic-startup "Constraint set with default order settings."
 do_test ordered-set-natural "Allow natural set ordering"
 do_test order-wrong-kind "Order (error)"
 
 echo ""
 do_test coloc-loop "Colocation - loop"
 do_test coloc-many-one "Colocation - many-to-one"
 do_test coloc-list "Colocation - many-to-one with list"
 do_test coloc-group "Colocation - groups"
 do_test coloc-slave-anti "Anti-colocation with slave shouldn't prevent master colocation"
 do_test coloc-attr "Colocation based on node attributes"
 do_test coloc-negative-group "Negative colocation with a group"
 do_test coloc-intra-set "Intra-set colocation"
 do_test bug-lf-2435 "Colocation sets with a negative score"
 do_test coloc-clone-stays-active "Ensure clones don't get stopped/demoted because a dependent must stop"
 do_test coloc_fp_logic "Verify floating point calculations in colocation are working"
 do_test colo_master_w_native "cl#5070 - Verify promotion order is affected when colocating master to native rsc."
 do_test colo_slave_w_native  "cl#5070 - Verify promotion order is affected when colocating slave to native rsc."
 do_test anti-colocation-order "cl#5187 - Prevent resources in an anti-colocation from even temporarily running on a same node"
 do_test anti-colocation-master "Organize order of actions for master resources in anti-colocations"
 do_test anti-colocation-slave "Organize order of actions for slave resources in anti-colocations"
 do_test enforce-colo1 "Always enforce B with A INFINITY."
 do_test complex_enforce_colo "Always enforce B with A INFINITY. (make sure heat-engine stops)"
 
 echo ""
 do_test rsc-sets-seq-true "Resource Sets - sequential=false"
 do_test rsc-sets-seq-false "Resource Sets - sequential=true"
 do_test rsc-sets-clone "Resource Sets - Clone"
 do_test rsc-sets-master "Resource Sets - Master"
 do_test rsc-sets-clone-1 "Resource Sets - Clone (lf#2404)"
 
 #echo ""
 #do_test agent1 "version: lt (empty)"
 #do_test agent2 "version: eq	"
 #do_test agent3 "version: gt	"
 
 echo ""
 do_test attrs1 "string: eq (and)     "
 do_test attrs2 "string: lt / gt (and)"
 do_test attrs3 "string: ne (or)      "
 do_test attrs4 "string: exists       "
 do_test attrs5 "string: not_exists   "
 do_test attrs6 "is_dc: true          "
 do_test attrs7 "is_dc: false         "
 do_test attrs8 "score_attribute      "
 do_test per-node-attrs "Per node resource parameters"
 
 echo ""
 do_test mon-rsc-1 "Schedule Monitor - start"
 do_test mon-rsc-2 "Schedule Monitor - move "
 do_test mon-rsc-3 "Schedule Monitor - pending start     "
 do_test mon-rsc-4 "Schedule Monitor - move/pending start"
 
 echo ""
 do_test rec-rsc-0 "Resource Recover - no start     "
 do_test rec-rsc-1 "Resource Recover - start        "
 do_test rec-rsc-2 "Resource Recover - monitor      "
 do_test rec-rsc-3 "Resource Recover - stop - ignore"
 do_test rec-rsc-4 "Resource Recover - stop - block "
 do_test rec-rsc-5 "Resource Recover - stop - fence "
 do_test rec-rsc-6 "Resource Recover - multiple - restart"
 do_test rec-rsc-7 "Resource Recover - multiple - stop   "
 do_test rec-rsc-8 "Resource Recover - multiple - block  "
 do_test rec-rsc-9 "Resource Recover - group/group"
 do_test monitor-recovery "on-fail=block + resource recovery detected by recurring monitor"
 do_test stop-failure-no-quorum "Stop failure without quorum"
 do_test stop-failure-no-fencing "Stop failure without fencing available"
 do_test stop-failure-with-fencing "Stop failure with fencing available"
 do_test multiple-active-block-group "Support of multiple-active=block for resource groups"
 do_test multiple-monitor-one-failed "Consider resource failed if any of the configured monitor operations failed"
 
 echo ""
 do_test quorum-1 "No quorum - ignore"
 do_test quorum-2 "No quorum - freeze"
 do_test quorum-3 "No quorum - stop  "
 do_test quorum-4 "No quorum - start anyway"
 do_test quorum-5 "No quorum - start anyway (group)"
 do_test quorum-6 "No quorum - start anyway (clone)"
 do_test bug-cl-5212 "No promotion with no-quorum-policy=freeze"
 do_test suicide-needed-inquorate "no-quorum-policy=suicide: suicide necessary"
 do_test suicide-not-needed-initial-quorum "no-quorum-policy=suicide: suicide not necessary at initial quorum"
 do_test suicide-not-needed-never-quorate "no-quorum-policy=suicide: suicide not necessary if never quorate"
 do_test suicide-not-needed-quorate "no-quorum-policy=suicide: suicide necessary if quorate"
 
 echo ""
 do_test rec-node-1 "Node Recover - Startup   - no fence"
 do_test rec-node-2 "Node Recover - Startup   - fence   "
 do_test rec-node-3 "Node Recover - HA down   - no fence"
 do_test rec-node-4 "Node Recover - HA down   - fence   "
 do_test rec-node-5 "Node Recover - CRM down  - no fence"
 do_test rec-node-6 "Node Recover - CRM down  - fence   "
 do_test rec-node-7 "Node Recover - no quorum - ignore  "
 do_test rec-node-8 "Node Recover - no quorum - freeze  "
 do_test rec-node-9 "Node Recover - no quorum - stop    "
 do_test rec-node-10 "Node Recover - no quorum - stop w/fence"
 do_test rec-node-11 "Node Recover - CRM down w/ group - fence   "
 do_test rec-node-12 "Node Recover - nothing active - fence   "
 do_test rec-node-13 "Node Recover - failed resource + shutdown - fence   "
 do_test rec-node-15 "Node Recover - unknown lrm section"
 do_test rec-node-14 "Serialize all stonith's"
 
 echo ""
 do_test multi1 "Multiple Active (stop/start)"
 
 echo ""
 do_test migrate-begin     "Normal migration"
 do_test migrate-success   "Completed migration"
 do_test migrate-partial-1 "Completed migration, missing stop on source"
 do_test migrate-partial-2 "Successful migrate_to only"
 do_test migrate-partial-3 "Successful migrate_to only, target down"
 do_test migrate-partial-4 "Migrate from the correct host after migrate_to+migrate_from"
 do_test bug-5186-partial-migrate "Handle partial migration when src node loses membership"
 
 do_test migrate-fail-2 "Failed migrate_from"
 do_test migrate-fail-3 "Failed migrate_from + stop on source"
 do_test migrate-fail-4 "Failed migrate_from + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-5 "Failed migrate_from + stop on source and target"
 
 do_test migrate-fail-6 "Failed migrate_to"
 do_test migrate-fail-7 "Failed migrate_to + stop on source"
 do_test migrate-fail-8 "Failed migrate_to + stop on target - ideally we wouldn't need to re-stop on target"
 do_test migrate-fail-9 "Failed migrate_to + stop on source and target"
 
 do_test migrate-stop "Migration in a stopping stack"
 do_test migrate-start "Migration in a starting stack"
 do_test migrate-stop_start "Migration in a restarting stack"
 do_test migrate-stop-complex "Migration in a complex stopping stack"
 do_test migrate-start-complex "Migration in a complex starting stack"
 do_test migrate-stop-start-complex "Migration in a complex moving stack"
 do_test migrate-shutdown "Order the post-migration 'stop' before node shutdown"
 
 do_test migrate-1 "Migrate (migrate)"
 do_test migrate-2 "Migrate (stable)"
 do_test migrate-3 "Migrate (failed migrate_to)"
 do_test migrate-4 "Migrate (failed migrate_from)"
 do_test novell-252693 "Migration in a stopping stack"
 do_test novell-252693-2 "Migration in a starting stack"
 do_test novell-252693-3 "Non-Migration in a starting and stopping stack"
 do_test bug-1820 "Migration in a group"
 do_test bug-1820-1 "Non-migration in a group"
 do_test migrate-5 "Primitive migration with a clone"
 do_test migrate-fencing "Migration after Fencing"
 do_test migrate-both-vms "Migrate two VMs that have no colocation"
 do_test migration-behind-migrating-remote "Migrate resource behind migrating remote connection"
 
 do_test 1-a-then-bm-move-b "Advanced migrate logic. A then B. migrate B."
 do_test 2-am-then-b-move-a "Advanced migrate logic, A then B, migrate A without stopping B"
 do_test 3-am-then-bm-both-migrate "Advanced migrate logic. A then B. migrate both"
 do_test 4-am-then-bm-b-not-migratable "Advanced migrate logic, A then B, B not migratable"
 do_test 5-am-then-bm-a-not-migratable "Advanced migrate logic. A then B. move both, a not migratable"
 do_test 6-migrate-group "Advanced migrate logic, migrate a group"
 do_test 7-migrate-group-one-unmigratable "Advanced migrate logic, migrate group mixed with allow-migrate true/false"
 do_test 8-am-then-bm-a-migrating-b-stopping "Advanced migrate logic, A then B, A migrating, B stopping"
 do_test 9-am-then-bm-b-migrating-a-stopping "Advanced migrate logic, A then B, B migrate, A stopping"
 do_test 10-a-then-bm-b-move-a-clone "Advanced migrate logic, A clone then B, migrate B while stopping A"
 do_test 11-a-then-bm-b-move-a-clone-starting "Advanced migrate logic, A clone then B, B moving while A is start/stopping"
 
 do_test a-promote-then-b-migrate "A promote then B start. migrate B"
 do_test a-demote-then-b-migrate "A demote then B stop. migrate B"
 
 if [ $DO_VERSIONED_TESTS -eq 1 ]; then
 	do_test migrate-versioned "Disable migration for versioned resources"
 fi
 
 #echo ""
 #do_test complex1 "Complex	"
 
 do_test bug-lf-2422 "Dependency on partially active group - stop ocfs:*"
 
 echo ""
 do_test clone-anon-probe-1 "Probe the correct (anonymous) clone instance for each node"
 do_test clone-anon-probe-2 "Avoid needless re-probing of anonymous clones"
 do_test clone-anon-failcount "Merge failcounts for anonymous clones"
 do_test inc0 "Incarnation start"
 do_test inc1 "Incarnation start order"
 do_test inc2 "Incarnation silent restart, stop, move"
 do_test inc3 "Inter-incarnation ordering, silent restart, stop, move"
 do_test inc4 "Inter-incarnation ordering, silent restart, stop, move (ordered)"
 do_test inc5 "Inter-incarnation ordering, silent restart, stop, move (restart 1)"
 do_test inc6 "Inter-incarnation ordering, silent restart, stop, move (restart 2)"
 do_test inc7 "Clone colocation"
 do_test inc8 "Clone anti-colocation"
 do_test inc9 "Non-unique clone"
 do_test inc10 "Non-unique clone (stop)"
 do_test inc11 "Primitive colocation with clones"
 do_test inc12 "Clone shutdown"
 do_test cloned-group "Make sure only the correct number of cloned groups are started"
 do_test cloned-group-stop "Ensure stopping qpidd also stops glance and cinder"
 do_test clone-no-shuffle "Don't prioritize allocation of instances that must be moved"
 do_test clone-max-zero "Orphan processing with clone-max=0"
 do_test clone-anon-dup "Bug LF#2087 - Correctly parse the state of anonymous clones that are active more than once per node"
 do_test bug-lf-2160 "Don't shuffle clones due to colocation"
 do_test bug-lf-2213 "clone-node-max enforcement for cloned groups"
 do_test bug-lf-2153 "Clone ordering constraints"
 do_test bug-lf-2361 "Ensure clones observe mandatory ordering constraints if the LHS is unrunnable"
 do_test bug-lf-2317 "Avoid needless restart of primitive depending on a clone"
 do_test clone-colocate-instance-1 "Colocation with a specific clone instance (negative example)"
 do_test clone-colocate-instance-2 "Colocation with a specific clone instance"
 do_test clone-order-instance "Ordering with specific clone instances"
 do_test bug-lf-2453 "Enforce mandatory clone ordering without colocation"
 do_test bug-lf-2508 "Correctly reconstruct the status of anonymous cloned groups"
 do_test bug-lf-2544 "Balanced clone placement"
 do_test bug-lf-2445 "Redistribute clones with node-max > 1 and stickiness = 0"
 do_test bug-lf-2574 "Avoid clone shuffle"
 do_test bug-lf-2581 "Avoid group restart due to unrelated clone (re)start"
 do_test bug-cl-5168 "Don't shuffle clones"
 do_test bug-cl-5170 "Prevent clone from starting with on-fail=block"
 do_test clone-fail-block-colocation "Move colocated group when failed clone has on-fail=block"
 do_test clone-interleave-1 "Clone-3 cannot start on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-2 "Clone-3 must stop on pcmk-1 due to interleaved ordering (no colocation)"
 do_test clone-interleave-3 "Clone-3 must be recovered on pcmk-1 due to interleaved ordering (no colocation)"
 
 echo ""
 do_test cloned_start_one  "order first clone then clone... first clone_min=2"
 do_test cloned_start_two  "order first clone then clone... first clone_min=2"
 do_test cloned_stop_one   "order first clone then clone... first clone_min=2"
 do_test cloned_stop_two   "order first clone then clone... first clone_min=2"
 do_test clone_min_interleave_start_one "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_start_two "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_one  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_interleave_stop_two  "order first clone then clone... first clone_min=2 and then has interleave=true"
 do_test clone_min_start_one "order first clone then primitive... first clone_min=2"
 do_test clone_min_start_two "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_all  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_one  "order first clone then primitive... first clone_min=2"
 do_test clone_min_stop_two  "order first clone then primitive... first clone_min=2"
 
 echo ""
 do_test unfence-startup "Clean unfencing"
 do_test unfence-definition "Unfencing when the agent changes"
 do_test unfence-parameters "Unfencing when the agent parameters changes"
 
 echo ""
 do_test master-0 "Stopped -> Slave"
 do_test master-1 "Stopped -> Promote"
 do_test master-2 "Stopped -> Promote : notify"
 do_test master-3 "Stopped -> Promote : master location"
 do_test master-4 "Started -> Promote : master location"
 do_test master-5 "Promoted -> Promoted"
 do_test master-6 "Promoted -> Promoted (2)"
 do_test master-7 "Promoted -> Fenced"
 do_test master-8 "Promoted -> Fenced -> Moved"
 do_test master-9 "Stopped + Promotable + No quorum"
 do_test master-10 "Stopped -> Promotable : notify with monitor"
 do_test master-11 "Stopped -> Promote : colocation"
 do_test novell-239082 "Demote/Promote ordering"
 do_test novell-239087 "Stable master placement"
 do_test master-12 "Promotion based solely on rsc_location constraints"
 do_test master-13 "Include preferences of colocated resources when placing master"
 do_test master-demote "Ordering when actions depends on demoting a slave resource"
 do_test master-ordering "Prevent resources from starting that need a master"
 do_test bug-1765 "Master-Master Colocation (dont stop the slaves)"
 do_test master-group "Promotion of cloned groups"
 do_test bug-lf-1852 "Don't shuffle master/slave instances unnecessarily"
 do_test master-failed-demote "Don't retry failed demote actions"
 do_test master-failed-demote-2 "Don't retry failed demote actions (notify=false)"
 do_test master-depend "Ensure resources that depend on the master don't get allocated until the master does"
 do_test master-reattach "Re-attach to a running master"
 do_test master-allow-start "Don't include master score if it would prevent allocation"
 do_test master-colocation "Allow master instances placemaker to be influenced by colocation constraints"
 do_test master-pseudo "Make sure promote/demote pseudo actions are created correctly"
 do_test master-role "Prevent target-role from promoting more than master-max instances"
 do_test bug-lf-2358 "Master-Master anti-colocation"
 do_test master-promotion-constraint "Mandatory master colocation constraints"
 do_test unmanaged-master "Ensure role is preserved for unmanaged resources"
 do_test master-unmanaged-monitor "Start the correct monitor operation for unmanaged masters"
 do_test master-demote-2 "Demote does not clear past failure"
 do_test master-move "Move master based on failure of colocated group"
 do_test master-probed-score "Observe the promotion score of probed resources"
 do_test colocation_constraint_stops_master "cl#5054 - Ensure master is demoted when stopped by colocation constraint"
 do_test colocation_constraint_stops_slave  "cl#5054 - Ensure slave is not demoted when stopped by colocation constraint"
 do_test order_constraint_stops_master      "cl#5054 - Ensure master is demoted when stopped by order constraint"
 do_test order_constraint_stops_slave       "cl#5054 - Ensure slave is not demoted when stopped by order constraint"
 do_test master_monitor_restart "cl#5072 - Ensure master monitor operation will start after promotion."
 do_test bug-rh-880249 "Handle replacement of an m/s resource with a primitive"
 do_test bug-5143-ms-shuffle "Prevent master shuffling due to promotion score"
 do_test master-demote-block "Block promotion if demote fails with on-fail=block"
 do_test master-dependent-ban "Don't stop instances from being active because a dependent is banned from that host"
 do_test master-stop "Stop instances due to location constraint with role=Started"
 do_test master-partially-demoted-group "Allow partially demoted group to finish demoting"
 do_test bug-cl-5213 "Ensure role colocation with -INFINITY is enforced"
 do_test bug-cl-5219 "Allow unrelated resources with a common colocation target to remain promoted"
 do_test master-asymmetrical-order "Fix the behaviors of multi-state resources with asymmetrical ordering"
 do_test master-notify "Master promotion with notifies"
 do_test master-score-startup "Use permanent master scores without LRM history"
 
 echo ""
 do_test history-1 "Correctly parse stateful-1 resource state"
 
 echo ""
 do_test managed-0 "Managed (reference)"
 do_test managed-1 "Not managed - down "
 do_test managed-2 "Not managed - up   "
 do_test bug-5028 "Shutdown should block if anything depends on an unmanaged resource"
 do_test bug-5028-detach "Ensure detach still works"
 do_test bug-5028-bottom "Ensure shutdown still blocks if the blocked resource is at the bottom of the stack"
 do_test unmanaged-stop-1 "cl#5155 - Block the stop of resources if any depending resource is unmanaged "
 do_test unmanaged-stop-2 "cl#5155 - Block the stop of resources if the first resource in a mandatory stop order is unmanaged "
 do_test unmanaged-stop-3 "cl#5155 - Block the stop of resources if any depending resource in a group is unmanaged "
 do_test unmanaged-stop-4 "cl#5155 - Block the stop of resources if any depending resource in the middle of a group is unmanaged "
 do_test unmanaged-block-restart "Block restart of resources if any dependent resource in a group is unmanaged"
 
 echo ""
 do_test interleave-0 "Interleave (reference)"
 do_test interleave-1 "coloc - not interleaved"
 do_test interleave-2 "coloc - interleaved   "
 do_test interleave-3 "coloc - interleaved (2)"
 do_test interleave-pseudo-stop "Interleaved clone during stonith"
 do_test interleave-stop "Interleaved clone during stop"
 do_test interleave-restart "Interleaved clone during dependency restart"
 
 echo ""
 do_test notify-0 "Notify reference"
 do_test notify-1 "Notify simple"
 do_test notify-2 "Notify simple, confirm"
 do_test notify-3 "Notify move, confirm"
 do_test novell-239079 "Notification priority"
 #do_test notify-2 "Notify - 764"
 
 echo ""
 do_test 594 "OSDL #594 - Unrunnable actions scheduled in transition"
 do_test 662 "OSDL #662 - Two resources start on one node when incarnation_node_max = 1"
 do_test 696 "OSDL #696 - CRM starts stonith RA without monitor"
 do_test 726 "OSDL #726 - Attempting to schedule rsc_posic041_monitor_5000 _after_ a stop"
 do_test 735 "OSDL #735 - Correctly detect that rsc_hadev1 is stopped on hadev3"
 do_test 764 "OSDL #764 - Missing monitor op for DoFencing:child_DoFencing:1"
 do_test 797 "OSDL #797 - Assert triggered: task_id_i > max_call_id"
 do_test 829 "OSDL #829"
 do_test 994 "OSDL #994 - Stopping the last resource in a resource group causes the entire group to be restarted"
 do_test 994-2 "OSDL #994 - with a dependent resource"
 do_test 1360 "OSDL #1360 - Clone stickiness"
 do_test 1484 "OSDL #1484 - on_fail=stop"
 do_test 1494 "OSDL #1494 - Clone stability"
 do_test unrunnable-1 "Unrunnable"
 do_test unrunnable-2 "Unrunnable 2"
 do_test stonith-0 "Stonith loop - 1"
 do_test stonith-1 "Stonith loop - 2"
 do_test stonith-2 "Stonith loop - 3"
 do_test stonith-3 "Stonith startup"
 do_test stonith-4 "Stonith node state"
 do_test bug-1572-1 "Recovery of groups depending on master/slave"
 do_test bug-1572-2 "Recovery of groups depending on master/slave when the master is never re-promoted"
 do_test bug-1685 "Depends-on-master ordering"
 do_test bug-1822 "Don't promote partially active groups"
 do_test bug-pm-11 "New resource added to a m/s group"
 do_test bug-pm-12 "Recover only the failed portion of a cloned group"
 do_test bug-n-387749 "Don't shuffle clone instances"
 do_test bug-n-385265 "Don't ignore the failure stickiness of group children - resource_idvscommon should stay stopped"
 do_test bug-n-385265-2 "Ensure groups are migrated instead of remaining partially active on the current node"
 do_test bug-lf-1920 "Correctly handle probes that find active resources"
 do_test bnc-515172 "Location constraint with multiple expressions"
 do_test colocate-primitive-with-clone "Optional colocation with a clone"
 do_test use-after-free-merge "Use-after-free in native_merge_weights"
 do_test bug-lf-2551 "STONITH ordering for stop"
 do_test bug-lf-2606 "Stonith implies demote"
 do_test bug-lf-2474 "Ensure resource op timeout takes precedence over op_defaults"
 do_test bug-suse-707150 "Prevent vm-01 from starting due to colocation/ordering"
 do_test bug-5014-A-start-B-start "Verify when A starts B starts using symmetrical=false"
 do_test bug-5014-A-stop-B-started "Verify when A stops B does not stop if it has already started using symmetric=false"
 do_test bug-5014-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using symmetric=false"
 do_test bug-5014-CthenAthenB-C-stopped "Verify when C then A is symmetrical=true, A then B is symmetric=false, and C is stopped that nothing starts."
 do_test bug-5014-CLONE-A-start-B-start "Verify when A starts B starts using clone resources with symmetric=false"
 do_test bug-5014-CLONE-A-stop-B-started "Verify when A stops B does not stop if it has already started using clone resources with symmetric=false."
 do_test bug-5014-GROUP-A-start-B-start "Verify when A starts B starts when using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-started "Verify when A stops B does not stop if it has already started using group resources with symmetric=false."
 do_test bug-5014-GROUP-A-stopped-B-stopped "Verify when A is stopped and B has not started, B does not start before A using group resources with symmetric=false."
 do_test bug-5014-ordered-set-symmetrical-false "Verify ordered sets work with symmetrical=false"
 do_test bug-5014-ordered-set-symmetrical-true "Verify ordered sets work with symmetrical=true"
 do_test bug-5007-masterslave_colocation "Verify use of colocation scores other than INFINITY and -INFINITY work on multi-state resources."
 do_test bug-5038 "Prevent restart of anonymous clones when clone-max decreases"
 do_test bug-5025-1 "Automatically clean up failcount after resource config change with reload"
 do_test bug-5025-2 "Make sure clear failcount action isn't set when config does not change."
 do_test bug-5025-3 "Automatically clean up failcount after resource config change with restart"
 do_test bug-5025-4 "Clear failcount when last failure is a start op and rsc attributes changed."
 do_test failcount "Ensure failcounts are correctly expired"
 do_test failcount-block "Ensure failcounts are not expired when on-fail=block is present"
 do_test per-op-failcount "Ensure per-operation failcount is handled and not passed to fence agent"
 do_test monitor-onfail-restart "bug-5058 - Monitor failure with on-fail set to restart"
 do_test monitor-onfail-stop    "bug-5058 - Monitor failure wiht on-fail set to stop"
 do_test bug-5059 "No need to restart p_stateful1:*"
 do_test bug-5069-op-enabled  "Test on-fail=ignore with failure when monitor is enabled."
 do_test bug-5069-op-disabled "Test on-fail-ignore with failure when monitor is disabled."
 do_test obsolete-lrm-resource "cl#5115 - Do not use obsolete lrm_resource sections"
 do_test expire-non-blocked-failure "Ignore failure-timeout only if the failed operation has on-fail=block"
 do_test asymmetrical-order-move "Respect asymmetrical ordering when trying to move resources"
 do_test start-then-stop-with-unfence "Avoid graph loop with start-then-stop constraint plus unfencing"
 
 do_test ignore_stonith_rsc_order1 "cl#5056- Ignore order constraint between stonith and non-stonith rsc."
 do_test ignore_stonith_rsc_order2 "cl#5056- Ignore order constraint with group rsc containing mixed stonith and non-stonith."
 do_test ignore_stonith_rsc_order3 "cl#5056- Ignore order constraint, stonith clone and mixed group"
 do_test ignore_stonith_rsc_order4 "cl#5056- Ignore order constraint, stonith clone and clone with nested mixed group"
 do_test honor_stonith_rsc_order1 "cl#5056- Honor order constraint, stonith clone and pure stonith group(single rsc)."
 do_test honor_stonith_rsc_order2 "cl#5056- Honor order constraint, stonith clone and pure stonith group(multiple rsc)"
 do_test honor_stonith_rsc_order3 "cl#5056- Honor order constraint, stonith clones with nested pure stonith group."
 do_test honor_stonith_rsc_order4 "cl#5056- Honor order constraint, between two native stonith rscs."
 do_test probe-timeout "cl#5099 - Default probe timeout"
 
 do_test concurrent-fencing "Allow performing fencing operations in parallel"
 
 echo ""
 do_test systemhealth1  "System Health ()               #1"
 do_test systemhealth2  "System Health ()               #2"
 do_test systemhealth3  "System Health ()               #3"
 do_test systemhealthn1 "System Health (None)           #1"
 do_test systemhealthn2 "System Health (None)           #2"
 do_test systemhealthn3 "System Health (None)           #3"
 do_test systemhealthm1 "System Health (Migrate On Red) #1"
 do_test systemhealthm2 "System Health (Migrate On Red) #2"
 do_test systemhealthm3 "System Health (Migrate On Red) #3"
 do_test systemhealtho1 "System Health (Only Green)     #1"
 do_test systemhealtho2 "System Health (Only Green)     #2"
 do_test systemhealtho3 "System Health (Only Green)     #3"
 do_test systemhealthp1 "System Health (Progessive)     #1"
 do_test systemhealthp2 "System Health (Progessive)     #2"
 do_test systemhealthp3 "System Health (Progessive)     #3"
 
 echo ""
 do_test utilization "Placement Strategy - utilization"
 do_test minimal     "Placement Strategy - minimal"
 do_test balanced    "Placement Strategy - balanced"
 
 echo ""
 do_test placement-stickiness "Optimized Placement Strategy - stickiness"
 do_test placement-priority   "Optimized Placement Strategy - priority"
 do_test placement-location   "Optimized Placement Strategy - location"
 do_test placement-capacity   "Optimized Placement Strategy - capacity"
 
 echo ""
 do_test utilization-order1 "Utilization Order - Simple"
 do_test utilization-order2 "Utilization Order - Complex"
 do_test utilization-order3 "Utilization Order - Migrate"
 do_test utilization-order4 "Utilization Order - Live Mirgration (bnc#695440)"
 do_test utilization-shuffle "Don't displace prmExPostgreSQLDB2 on act2, Start prmExPostgreSQLDB1 on act3"
 do_test load-stopped-loop "Avoid transition loop due to load_stopped (cl#5044)"
 do_test load-stopped-loop-2 "cl#5235 - Prevent graph loops that can be introduced by load_stopped -> migrate_to ordering"
 
 echo ""
 do_test colocated-utilization-primitive-1 "Colocated Utilization - Primitive"
 do_test colocated-utilization-primitive-2 "Colocated Utilization - Choose the most capable node"
 do_test colocated-utilization-group "Colocated Utilization - Group"
 do_test colocated-utilization-clone "Colocated Utilization - Clone"
 
 do_test utilization-check-allowed-nodes "Only check the capacities of the nodes that can run the resource"
 
 echo ""
 do_test reprobe-target_rc "Ensure correct target_rc for reprobe of inactive resources"
 do_test node-maintenance-1 "cl#5128 - Node maintenance"
 do_test node-maintenance-2 "cl#5128 - Node maintenance (coming out of maintenance mode)"
 do_test shutdown-maintenance-node "Do not fence a maintenance node if it shuts down cleanly"
 
 do_test rsc-maintenance "Per-resource maintenance"
 
 echo ""
 do_test not-installed-agent "The resource agent is missing"
 do_test not-installed-tools "Something the resource agent needs is missing"
 
 echo ""
 do_test stopped-monitor-00	"Stopped Monitor - initial start"
 do_test stopped-monitor-01	"Stopped Monitor - failed started"
 do_test stopped-monitor-02	"Stopped Monitor - started multi-up"
 do_test stopped-monitor-03	"Stopped Monitor - stop started"
 do_test stopped-monitor-04	"Stopped Monitor - failed stop"
 do_test stopped-monitor-05	"Stopped Monitor - start unmanaged"
 do_test stopped-monitor-06	"Stopped Monitor - unmanaged multi-up"
 do_test stopped-monitor-07	"Stopped Monitor - start unmanaged multi-up"
 do_test stopped-monitor-08	"Stopped Monitor - migrate"
 do_test stopped-monitor-09	"Stopped Monitor - unmanage started"
 do_test stopped-monitor-10	"Stopped Monitor - unmanaged started multi-up"
 do_test stopped-monitor-11	"Stopped Monitor - stop unmanaged started"
 do_test stopped-monitor-12	"Stopped Monitor - unmanaged started multi-up (targer-role="Stopped")"
 do_test stopped-monitor-20	"Stopped Monitor - initial stop"
 do_test stopped-monitor-21	"Stopped Monitor - stopped single-up"
 do_test stopped-monitor-22	"Stopped Monitor - stopped multi-up"
 do_test stopped-monitor-23	"Stopped Monitor - start stopped"
 do_test stopped-monitor-24	"Stopped Monitor - unmanage stopped"
 do_test stopped-monitor-25	"Stopped Monitor - unmanaged stopped multi-up"
 do_test stopped-monitor-26	"Stopped Monitor - start unmanaged stopped"
 do_test stopped-monitor-27	"Stopped Monitor - unmanaged stopped multi-up (target-role="Started")"
 do_test stopped-monitor-30	"Stopped Monitor - new node started"
 do_test stopped-monitor-31	"Stopped Monitor - new node stopped"
 
 echo""
 do_test ticket-primitive-1 "Ticket - Primitive (loss-policy=stop, initial)"
 do_test ticket-primitive-2 "Ticket - Primitive (loss-policy=stop, granted)"
 do_test ticket-primitive-3 "Ticket - Primitive (loss-policy-stop, revoked)"
 do_test ticket-primitive-4 "Ticket - Primitive (loss-policy=demote, initial)"
 do_test ticket-primitive-5 "Ticket - Primitive (loss-policy=demote, granted)"
 do_test ticket-primitive-6 "Ticket - Primitive (loss-policy=demote, revoked)"
 do_test ticket-primitive-7 "Ticket - Primitive (loss-policy=fence, initial)"
 do_test ticket-primitive-8 "Ticket - Primitive (loss-policy=fence, granted)"
 do_test ticket-primitive-9 "Ticket - Primitive (loss-policy=fence, revoked)"
 do_test ticket-primitive-10 "Ticket - Primitive (loss-policy=freeze, initial)"
 do_test ticket-primitive-11 "Ticket - Primitive (loss-policy=freeze, granted)"
 do_test ticket-primitive-12 "Ticket - Primitive (loss-policy=freeze, revoked)"
 
 do_test ticket-primitive-13 "Ticket - Primitive (loss-policy=stop, standby, granted)"
 do_test ticket-primitive-14 "Ticket - Primitive (loss-policy=stop, granted, standby)"
 do_test ticket-primitive-15 "Ticket - Primitive (loss-policy=stop, standby, revoked)"
 do_test ticket-primitive-16 "Ticket - Primitive (loss-policy=demote, standby, granted)"
 do_test ticket-primitive-17 "Ticket - Primitive (loss-policy=demote, granted, standby)"
 do_test ticket-primitive-18 "Ticket - Primitive (loss-policy=demote, standby, revoked)"
 do_test ticket-primitive-19 "Ticket - Primitive (loss-policy=fence, standby, granted)"
 do_test ticket-primitive-20 "Ticket - Primitive (loss-policy=fence, granted, standby)"
 do_test ticket-primitive-21 "Ticket - Primitive (loss-policy=fence, standby, revoked)"
 do_test ticket-primitive-22 "Ticket - Primitive (loss-policy=freeze, standby, granted)"
 do_test ticket-primitive-23 "Ticket - Primitive (loss-policy=freeze, granted, standby)"
 do_test ticket-primitive-24 "Ticket - Primitive (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-group-1 "Ticket - Group (loss-policy=stop, initial)"
 do_test ticket-group-2 "Ticket - Group (loss-policy=stop, granted)"
 do_test ticket-group-3 "Ticket - Group (loss-policy-stop, revoked)"
 do_test ticket-group-4 "Ticket - Group (loss-policy=demote, initial)"
 do_test ticket-group-5 "Ticket - Group (loss-policy=demote, granted)"
 do_test ticket-group-6 "Ticket - Group (loss-policy=demote, revoked)"
 do_test ticket-group-7 "Ticket - Group (loss-policy=fence, initial)"
 do_test ticket-group-8 "Ticket - Group (loss-policy=fence, granted)"
 do_test ticket-group-9 "Ticket - Group (loss-policy=fence, revoked)"
 do_test ticket-group-10 "Ticket - Group (loss-policy=freeze, initial)"
 do_test ticket-group-11 "Ticket - Group (loss-policy=freeze, granted)"
 do_test ticket-group-12 "Ticket - Group (loss-policy=freeze, revoked)"
 
 do_test ticket-group-13 "Ticket - Group (loss-policy=stop, standby, granted)"
 do_test ticket-group-14 "Ticket - Group (loss-policy=stop, granted, standby)"
 do_test ticket-group-15 "Ticket - Group (loss-policy=stop, standby, revoked)"
 do_test ticket-group-16 "Ticket - Group (loss-policy=demote, standby, granted)"
 do_test ticket-group-17 "Ticket - Group (loss-policy=demote, granted, standby)"
 do_test ticket-group-18 "Ticket - Group (loss-policy=demote, standby, revoked)"
 do_test ticket-group-19 "Ticket - Group (loss-policy=fence, standby, granted)"
 do_test ticket-group-20 "Ticket - Group (loss-policy=fence, granted, standby)"
 do_test ticket-group-21 "Ticket - Group (loss-policy=fence, standby, revoked)"
 do_test ticket-group-22 "Ticket - Group (loss-policy=freeze, standby, granted)"
 do_test ticket-group-23 "Ticket - Group (loss-policy=freeze, granted, standby)"
 do_test ticket-group-24 "Ticket - Group (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-clone-1 "Ticket - Clone (loss-policy=stop, initial)"
 do_test ticket-clone-2 "Ticket - Clone (loss-policy=stop, granted)"
 do_test ticket-clone-3 "Ticket - Clone (loss-policy-stop, revoked)"
 do_test ticket-clone-4 "Ticket - Clone (loss-policy=demote, initial)"
 do_test ticket-clone-5 "Ticket - Clone (loss-policy=demote, granted)"
 do_test ticket-clone-6 "Ticket - Clone (loss-policy=demote, revoked)"
 do_test ticket-clone-7 "Ticket - Clone (loss-policy=fence, initial)"
 do_test ticket-clone-8 "Ticket - Clone (loss-policy=fence, granted)"
 do_test ticket-clone-9 "Ticket - Clone (loss-policy=fence, revoked)"
 do_test ticket-clone-10 "Ticket - Clone (loss-policy=freeze, initial)"
 do_test ticket-clone-11 "Ticket - Clone (loss-policy=freeze, granted)"
 do_test ticket-clone-12 "Ticket - Clone (loss-policy=freeze, revoked)"
 
 do_test ticket-clone-13 "Ticket - Clone (loss-policy=stop, standby, granted)"
 do_test ticket-clone-14 "Ticket - Clone (loss-policy=stop, granted, standby)"
 do_test ticket-clone-15 "Ticket - Clone (loss-policy=stop, standby, revoked)"
 do_test ticket-clone-16 "Ticket - Clone (loss-policy=demote, standby, granted)"
 do_test ticket-clone-17 "Ticket - Clone (loss-policy=demote, granted, standby)"
 do_test ticket-clone-18 "Ticket - Clone (loss-policy=demote, standby, revoked)"
 do_test ticket-clone-19 "Ticket - Clone (loss-policy=fence, standby, granted)"
 do_test ticket-clone-20 "Ticket - Clone (loss-policy=fence, granted, standby)"
 do_test ticket-clone-21 "Ticket - Clone (loss-policy=fence, standby, revoked)"
 do_test ticket-clone-22 "Ticket - Clone (loss-policy=freeze, standby, granted)"
 do_test ticket-clone-23 "Ticket - Clone (loss-policy=freeze, granted, standby)"
 do_test ticket-clone-24 "Ticket - Clone (loss-policy=freeze, standby, revoked)"
 
 echo""
 do_test ticket-master-1 "Ticket - Master (loss-policy=stop, initial)"
 do_test ticket-master-2 "Ticket - Master (loss-policy=stop, granted)"
 do_test ticket-master-3 "Ticket - Master (loss-policy-stop, revoked)"
 do_test ticket-master-4 "Ticket - Master (loss-policy=demote, initial)"
 do_test ticket-master-5 "Ticket - Master (loss-policy=demote, granted)"
 do_test ticket-master-6 "Ticket - Master (loss-policy=demote, revoked)"
 do_test ticket-master-7 "Ticket - Master (loss-policy=fence, initial)"
 do_test ticket-master-8 "Ticket - Master (loss-policy=fence, granted)"
 do_test ticket-master-9 "Ticket - Master (loss-policy=fence, revoked)"
 do_test ticket-master-10 "Ticket - Master (loss-policy=freeze, initial)"
 do_test ticket-master-11 "Ticket - Master (loss-policy=freeze, granted)"
 do_test ticket-master-12 "Ticket - Master (loss-policy=freeze, revoked)"
 
 do_test ticket-master-13 "Ticket - Master (loss-policy=stop, standby, granted)"
 do_test ticket-master-14 "Ticket - Master (loss-policy=stop, granted, standby)"
 do_test ticket-master-15 "Ticket - Master (loss-policy=stop, standby, revoked)"
 do_test ticket-master-16 "Ticket - Master (loss-policy=demote, standby, granted)"
 do_test ticket-master-17 "Ticket - Master (loss-policy=demote, granted, standby)"
 do_test ticket-master-18 "Ticket - Master (loss-policy=demote, standby, revoked)"
 do_test ticket-master-19 "Ticket - Master (loss-policy=fence, standby, granted)"
 do_test ticket-master-20 "Ticket - Master (loss-policy=fence, granted, standby)"
 do_test ticket-master-21 "Ticket - Master (loss-policy=fence, standby, revoked)"
 do_test ticket-master-22 "Ticket - Master (loss-policy=freeze, standby, granted)"
 do_test ticket-master-23 "Ticket - Master (loss-policy=freeze, granted, standby)"
 do_test ticket-master-24 "Ticket - Master (loss-policy=freeze, standby, revoked)"
 
 echo ""
 do_test ticket-rsc-sets-1 "Ticket - Resource sets (1 ticket, initial)"
 do_test ticket-rsc-sets-2 "Ticket - Resource sets (1 ticket, granted)"
 do_test ticket-rsc-sets-3 "Ticket - Resource sets (1 ticket, revoked)"
 do_test ticket-rsc-sets-4 "Ticket - Resource sets (2 tickets, initial)"
 do_test ticket-rsc-sets-5 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-6 "Ticket - Resource sets (2 tickets, granted)"
 do_test ticket-rsc-sets-7 "Ticket - Resource sets (2 tickets, revoked)"
 
 do_test ticket-rsc-sets-8 "Ticket - Resource sets (1 ticket, standby, granted)"
 do_test ticket-rsc-sets-9 "Ticket - Resource sets (1 ticket, granted, standby)"
 do_test ticket-rsc-sets-10 "Ticket - Resource sets (1 ticket, standby, revoked)"
 do_test ticket-rsc-sets-11 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-12 "Ticket - Resource sets (2 tickets, standby, granted)"
 do_test ticket-rsc-sets-13 "Ticket - Resource sets (2 tickets, granted, standby)"
 do_test ticket-rsc-sets-14 "Ticket - Resource sets (2 tickets, standby, revoked)"
 
 do_test cluster-specific-params "Cluster-specific instance attributes based on rules"
 do_test site-specific-params "Site-specific instance attributes based on rules"
 
 echo ""
 do_test template-1 "Template - 1"
 do_test template-2 "Template - 2"
 do_test template-3 "Template - 3 (merge operations)"
 
 do_test template-coloc-1 "Template - Colocation 1"
 do_test template-coloc-2 "Template - Colocation 2"
 do_test template-coloc-3 "Template - Colocation 3"
 do_test template-order-1 "Template - Order 1"
 do_test template-order-2 "Template - Order 2"
 do_test template-order-3 "Template - Order 3"
 do_test template-ticket  "Template - Ticket"
 
 do_test template-rsc-sets-1  "Template - Resource Sets 1"
 do_test template-rsc-sets-2  "Template - Resource Sets 2"
 do_test template-rsc-sets-3  "Template - Resource Sets 3"
 do_test template-rsc-sets-4  "Template - Resource Sets 4"
 
 do_test template-clone-primitive "Cloned primitive from template"
 do_test template-clone-group     "Cloned group from template"
 
 do_test location-sets-templates "Resource sets and templates - Location"
 
 do_test tags-coloc-order-1 "Tags - Colocation and Order (Simple)"
 do_test tags-coloc-order-2 "Tags - Colocation and Order (Resource Sets with Templates)"
 do_test tags-location      "Tags - Location"
 do_test tags-ticket        "Tags - Ticket"
 
 echo ""
 do_test container-1 "Container - initial"
 do_test container-2 "Container - monitor failed"
 do_test container-3 "Container - stop failed"
 do_test container-4 "Container - reached migration-threshold"
 do_test container-group-1 "Container in group - initial"
 do_test container-group-2 "Container in group - monitor failed"
 do_test container-group-3 "Container in group - stop failed"
 do_test container-group-4 "Container in group - reached migration-threshold"
 do_test container-is-remote-node "Place resource within container when container is remote-node"
 do_test bug-rh-1097457 "Kill user defined container/contents ordering"
 do_test bug-cl-5247 "Graph loop when recovering m/s resource in a container"
 
 do_test bundle-order-startup "Bundle startup ordering"
 do_test bundle-order-partial-start "Bundle startup ordering when some dependancies are already running"
 do_test bundle-order-partial-start-2 "Bundle startup ordering when some dependancies and the container are already running"
 do_test bundle-order-stop    "Bundle stop ordering"
 do_test bundle-order-partial-stop "Bundle startup ordering when some dependancies are already stopped"
 
 do_test bundle-order-startup-clone "Prevent startup because bundle isn't promoted"
 do_test bundle-order-startup-clone-2 "Bundle startup with clones"
 do_test bundle-order-stop-clone "Stop bundle because clone is stopping"
 do_test bundle-nested-colocation "Colocation of nested connection resources"
 
 do_test bundle-order-fencing "Order pseudo bundle fencing after parent node fencing if both are happening"
 
 echo ""
 do_test whitebox-fail1 "Fail whitebox container rsc."
 do_test whitebox-fail2 "Fail whitebox container rsc lrmd connection."
 do_test whitebox-fail3 "Failed containers should not run nested on remote nodes."
 do_test whitebox-start "Start whitebox container with resources assigned to it"
 do_test whitebox-stop "Stop whitebox container with resources assigned to it"
 do_test whitebox-move "Move whitebox container with resources assigned to it"
 do_test whitebox-asymmetric "Verify connection rsc opts-in based on container resource"
 do_test whitebox-ms-ordering "Verify promote/demote can not occur before connection is established"
 do_test whitebox-ms-ordering-move "Stop/Start cycle within a moving container"
 do_test whitebox-orphaned    "Properly shutdown orphaned whitebox container"
 do_test whitebox-orphan-ms   "Properly tear down orphan ms resources on remote-nodes"
 do_test whitebox-unexpectedly-running "Recover container nodes the cluster did not start."
 do_test whitebox-migrate1 "Migrate both container and connection resource"
 do_test whitebox-imply-stop-on-fence "imply stop action on container node rsc when host node is fenced"
 do_test whitebox-nested-group "Verify guest remote-node works nested in a group"
 do_test guest-node-host-dies "Verify guest node is recovered if host goes away"
 
 echo ""
 do_test remote-startup-probes  "Baremetal remote-node startup probes"
 do_test remote-startup         "Startup a newly discovered remote-nodes with no status."
 do_test remote-fence-unclean   "Fence unclean baremetal remote-node"
 do_test remote-fence-unclean2  "Fence baremetal remote-node after cluster node fails and connection can not be recovered"
 do_test remote-fence-unclean-3 "Probe failed remote nodes (triggers fencing)"
 do_test remote-move            "Move remote-node connection resource"
 do_test remote-disable         "Disable a baremetal remote-node"
 do_test remote-probe-disable   "Probe then stop a baremetal remote-node"
 do_test remote-orphaned        "Properly shutdown orphaned connection resource"
 do_test remote-orphaned2       "verify we can handle orphaned remote connections with active resources on the remote"
 do_test remote-recover         "Recover connection resource after cluster-node fails."
 do_test remote-stale-node-entry "Make sure we properly handle leftover remote-node entries in the node section"
 do_test remote-partial-migrate  "Make sure partial migrations are handled before ops on the remote node."
 do_test remote-partial-migrate2 "Make sure partial migration target is prefered for remote connection."
 do_test remote-recover-fail     "Make sure start failure causes fencing if rsc are active on remote."
 do_test remote-start-fail       "Make sure a start failure does not result in fencing if no active resources are on remote."
 do_test remote-unclean2         "Make monitor failure always results in fencing, even if no rsc are active on remote."
 do_test remote-fence-before-reconnect "Fence before clearing recurring monitor failure"
 do_test remote-recovery		"Recover remote connections before attempting demotion"
 do_test remote-recover-connection "Optimistically recovery of only the connection"
 do_test remote-recover-all        "Fencing when the connection has no home"
 do_test remote-recover-no-resources   "Fencing when the connection has no home and no active resources"
 do_test remote-recover-unknown        "Fencing when the connection has no home and the remote has no operation history"
 do_test remote-reconnect-delay        "Waiting for remote reconnect interval to expire"
+do_test remote-connection-unrecoverable  "Remote connection host must be fenced, with connection unrecoverable"
 
 echo ""
 do_test resource-discovery      "Exercises resource-discovery location constraint option."
 do_test rsc-discovery-per-node  "Disable resource discovery per node"
 
 echo ""
 do_test isolation-start-all   "Start docker isolated resources."
 do_test isolation-restart-all "Restart docker isolated resources."
 do_test isolation-clone       "Cloned isolated primitive."
 
 if [ $DO_VERSIONED_TESTS -eq 1 ]; then
 	echo ""
 	do_test versioned-resources     "Start resources with #ra-version rules"
 	do_test restart-versioned       "Restart resources on #ra-version change"
 	do_test reload-versioned        "Reload resources on #ra-version change"
 
 	echo ""
 	do_test versioned-operations-1  "Use #ra-version to configure operations of native resources"
 	do_test versioned-operations-2  "Use #ra-version to configure operations of stonith resources"
 	do_test versioned-operations-3  "Use #ra-version to configure operations of master/slave resources"
 	do_test versioned-operations-4  "Use #ra-version to configure operations of groups of the resources"
 fi
 
 echo ""
 test_results
diff --git a/pengine/test10/remote-connection-unrecoverable.dot b/pengine/test10/remote-connection-unrecoverable.dot
new file mode 100644
index 0000000000..6bfda89e28
--- /dev/null
+++ b/pengine/test10/remote-connection-unrecoverable.dot
@@ -0,0 +1,53 @@
+digraph "g" {
+"all_stopped" -> "killer_start_0 node2" [ style = bold]
+"all_stopped" [ style=bold color="green" fontcolor="orange"]
+"killer_monitor_60000 node2" [ style=bold color="green" fontcolor="black"]
+"killer_start_0 node2" -> "killer_monitor_60000 node2" [ style = bold]
+"killer_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"killer_stop_0 node2" -> "all_stopped" [ style = bold]
+"killer_stop_0 node2" -> "killer_start_0 node2" [ style = bold]
+"killer_stop_0 node2" [ style=bold color="green" fontcolor="black"]
+"remote1_stop_0 node1" -> "all_stopped" [ style = bold]
+"remote1_stop_0 node1" [ style=bold color="green" fontcolor="orange"]
+"rsc1_delete_0 remote1" -> "rsc1_start_0 node2" [ style = dashed]
+"rsc1_delete_0 remote1" [ style=dashed color="red" fontcolor="black"]
+"rsc1_monitor_0 node2" -> "rsc1_start_0 node2" [ style = bold]
+"rsc1_monitor_0 node2" -> "rsc1_stop_0 remote1" [ style = bold]
+"rsc1_monitor_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_start_0 node2" -> "rsc1_monitor_10000 node2" [ style = bold]
+"rsc1_start_0 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1_stop_0 remote1" -> "all_stopped" [ style = bold]
+"rsc1_stop_0 remote1" -> "remote1_stop_0 node1" [ style = bold]
+"rsc1_stop_0 remote1" -> "rsc1_delete_0 remote1" [ style = dashed]
+"rsc1_stop_0 remote1" -> "rsc1_start_0 node2" [ style = bold]
+"rsc1_stop_0 remote1" -> "rsc2-master_demote_0" [ style = bold]
+"rsc1_stop_0 remote1" [ style=bold color="green" fontcolor="orange"]
+"rsc2-master_demote_0" -> "rsc2-master_demoted_0" [ style = bold]
+"rsc2-master_demote_0" -> "rsc2_demote_0 node1" [ style = bold]
+"rsc2-master_demote_0" [ style=bold color="green" fontcolor="orange"]
+"rsc2-master_demoted_0" -> "rsc2-master_stop_0" [ style = bold]
+"rsc2-master_demoted_0" [ style=bold color="green" fontcolor="orange"]
+"rsc2-master_stop_0" -> "rsc2-master_stopped_0" [ style = bold]
+"rsc2-master_stop_0" -> "rsc2_stop_0 node1" [ style = bold]
+"rsc2-master_stop_0" [ style=bold color="green" fontcolor="orange"]
+"rsc2-master_stopped_0" [ style=bold color="green" fontcolor="orange"]
+"rsc2_demote_0 node1" -> "rsc2-master_demoted_0" [ style = bold]
+"rsc2_demote_0 node1" -> "rsc2_stop_0 node1" [ style = bold]
+"rsc2_demote_0 node1" [ style=bold color="green" fontcolor="orange"]
+"rsc2_stop_0 node1" -> "all_stopped" [ style = bold]
+"rsc2_stop_0 node1" -> "rsc2-master_stopped_0" [ style = bold]
+"rsc2_stop_0 node1" [ style=bold color="green" fontcolor="orange"]
+"stonith 'reboot' node1" -> "remote1_stop_0 node1" [ style = bold]
+"stonith 'reboot' node1" -> "rsc2-master_stop_0" [ style = bold]
+"stonith 'reboot' node1" -> "rsc2_demote_0 node1" [ style = bold]
+"stonith 'reboot' node1" -> "rsc2_stop_0 node1" [ style = bold]
+"stonith 'reboot' node1" -> "stonith 'reboot' remote1" [ style = bold]
+"stonith 'reboot' node1" [ style=bold color="green" fontcolor="black"]
+"stonith 'reboot' remote1" -> "rsc1_stop_0 remote1" [ style = bold]
+"stonith 'reboot' remote1" -> "stonith_complete" [ style = bold]
+"stonith 'reboot' remote1" [ style=bold color="green" fontcolor="black"]
+"stonith_complete" -> "all_stopped" [ style = bold]
+"stonith_complete" -> "rsc1_start_0 node2" [ style = bold]
+"stonith_complete" [ style=bold color="green" fontcolor="orange"]
+}
diff --git a/pengine/test10/remote-connection-unrecoverable.exp b/pengine/test10/remote-connection-unrecoverable.exp
new file mode 100644
index 0000000000..35e340957e
--- /dev/null
+++ b/pengine/test10/remote-connection-unrecoverable.exp
@@ -0,0 +1,263 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
+  <synapse id="0">
+    <action_set>
+      <pseudo_event id="10" operation="stop" operation_key="remote1_stop_0">
+        <attributes CRM_meta_timeout="20000"  reconnect_interval="60"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <crm_event id="1" operation="stonith" operation_key="stonith-node1-reboot" on_node="node1" on_node_uuid="1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="7" operation="stop" operation_key="rsc1_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <rsc_op id="11" operation="stop" operation_key="killer_stop_0" on_node="node2" on_node_uuid="2">
+        <primitive id="killer" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <rsc_op id="6" operation="start" operation_key="killer_start_0" on_node="node2" on_node_uuid="2">
+        <primitive id="killer" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="5" operation="all_stopped" operation_key="all_stopped"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="11" operation="stop" operation_key="killer_stop_0" on_node="node2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="3">
+    <action_set>
+      <rsc_op id="3" operation="monitor" operation_key="killer_monitor_60000" on_node="node2" on_node_uuid="2">
+        <primitive id="killer" class="stonith" type="fence_xvm"/>
+        <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="6" operation="start" operation_key="killer_start_0" on_node="node2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="4">
+    <action_set>
+      <rsc_op id="13" operation="monitor" operation_key="rsc1_monitor_10000" on_node="node2" on_node_uuid="2">
+        <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="12" operation="start" operation_key="rsc1_start_0" on_node="node2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="5">
+    <action_set>
+      <rsc_op id="12" operation="start" operation_key="rsc1_start_0" on_node="node2" on_node_uuid="2">
+        <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="7" operation="stop" operation_key="rsc1_stop_0"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="9" operation="monitor" operation_key="rsc1_monitor_0" on_node="node2" on_node_uuid="2"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="28" operation="stonith_complete" operation_key="stonith_complete"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="6">
+    <action_set>
+      <rsc_op id="9" operation="monitor" operation_key="rsc1_monitor_0" on_node="node2" on_node_uuid="2">
+        <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy"/>
+        <attributes CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_op_target_rc="7" CRM_meta_timeout="20000" />
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="7">
+    <action_set>
+      <pseudo_event id="7" operation="stop" operation_key="rsc1_stop_0">
+        <attributes CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="9" operation="monitor" operation_key="rsc1_monitor_0" on_node="node2" on_node_uuid="2"/>
+      </trigger>
+      <trigger>
+        <crm_event id="29" operation="stonith" operation_key="stonith-remote1-reboot" on_node="remote1" on_node_uuid="remote1"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="8">
+    <action_set>
+      <pseudo_event id="15" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:0_stop_0">
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <crm_event id="1" operation="stonith" operation_key="stonith-node1-reboot" on_node="node1" on_node_uuid="1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="14" operation="demote" operation_key="rsc2_demote_0" internal_operation_key="rsc2:0_demote_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="22" operation="stop" operation_key="rsc2-master_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="9">
+    <action_set>
+      <pseudo_event id="14" operation="demote" operation_key="rsc2_demote_0" internal_operation_key="rsc2:0_demote_0">
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <crm_event id="1" operation="stonith" operation_key="stonith-node1-reboot" on_node="node1" on_node_uuid="1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="26" operation="demote" operation_key="rsc2-master_demote_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="10" priority="1000000">
+    <action_set>
+      <pseudo_event id="27" operation="demoted" operation_key="rsc2-master_demoted_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="14" operation="demote" operation_key="rsc2_demote_0" internal_operation_key="rsc2:0_demote_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="26" operation="demote" operation_key="rsc2-master_demote_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="11">
+    <action_set>
+      <pseudo_event id="26" operation="demote" operation_key="rsc2-master_demote_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="7" operation="stop" operation_key="rsc1_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="12" priority="1000000">
+    <action_set>
+      <pseudo_event id="23" operation="stopped" operation_key="rsc2-master_stopped_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="15" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:0_stop_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="22" operation="stop" operation_key="rsc2-master_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="13">
+    <action_set>
+      <pseudo_event id="22" operation="stop" operation_key="rsc2-master_stop_0">
+        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="2" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <crm_event id="1" operation="stonith" operation_key="stonith-node1-reboot" on_node="node1" on_node_uuid="1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="27" operation="demoted" operation_key="rsc2-master_demoted_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="14">
+    <action_set>
+      <crm_event id="29" operation="stonith" operation_key="stonith-remote1-reboot" on_node="remote1" on_node_uuid="remote1">
+        <attributes CRM_meta_on_node="remote1" CRM_meta_on_node_uuid="remote1" CRM_meta_probe_complete="true" CRM_meta_stonith_action="reboot" />
+        <downed>
+          <node id="remote1"/>
+        </downed>
+      </crm_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <crm_event id="1" operation="stonith" operation_key="stonith-node1-reboot" on_node="node1" on_node_uuid="1"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="15">
+    <action_set>
+      <pseudo_event id="28" operation="stonith_complete" operation_key="stonith_complete">
+        <attributes />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <crm_event id="29" operation="stonith" operation_key="stonith-remote1-reboot" on_node="remote1" on_node_uuid="remote1"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="16">
+    <action_set>
+      <pseudo_event id="5" operation="all_stopped" operation_key="all_stopped">
+        <attributes />
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="7" operation="stop" operation_key="rsc1_stop_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="10" operation="stop" operation_key="remote1_stop_0"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="11" operation="stop" operation_key="killer_stop_0" on_node="node2" on_node_uuid="2"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="15" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:0_stop_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="28" operation="stonith_complete" operation_key="stonith_complete"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="17">
+    <action_set>
+      <crm_event id="1" operation="stonith" operation_key="stonith-node1-reboot" on_node="node1" on_node_uuid="1">
+        <attributes CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_stonith_action="reboot" />
+        <downed>
+          <node id="1"/>
+        </downed>
+      </crm_event>
+    </action_set>
+    <inputs/>
+  </synapse>
+</transition_graph>
diff --git a/pengine/test10/remote-connection-unrecoverable.scores b/pengine/test10/remote-connection-unrecoverable.scores
new file mode 100644
index 0000000000..5d95fb4d32
--- /dev/null
+++ b/pengine/test10/remote-connection-unrecoverable.scores
@@ -0,0 +1,34 @@
+Allocation scores:
+clone_color: rsc2-master allocation score on node1: 0
+clone_color: rsc2-master allocation score on node2: 0
+clone_color: rsc2-master allocation score on remote1: -INFINITY
+clone_color: rsc2:0 allocation score on node1: 1
+clone_color: rsc2:0 allocation score on node2: 0
+clone_color: rsc2:0 allocation score on remote1: -INFINITY
+clone_color: rsc2:1 allocation score on node1: 0
+clone_color: rsc2:1 allocation score on node2: 11
+clone_color: rsc2:1 allocation score on remote1: -INFINITY
+clone_color: rsc2:2 allocation score on node1: 0
+clone_color: rsc2:2 allocation score on node2: 10
+clone_color: rsc2:2 allocation score on remote1: -INFINITY
+native_color: killer allocation score on node1: 0
+native_color: killer allocation score on node2: 0
+native_color: killer allocation score on remote1: -INFINITY
+native_color: remote1 allocation score on node1: 0
+native_color: remote1 allocation score on node2: -INFINITY
+native_color: remote1 allocation score on remote1: -INFINITY
+native_color: rsc1 allocation score on node1: 0
+native_color: rsc1 allocation score on node2: 0
+native_color: rsc1 allocation score on remote1: 0
+native_color: rsc2:0 allocation score on node1: -INFINITY
+native_color: rsc2:0 allocation score on node2: -INFINITY
+native_color: rsc2:0 allocation score on remote1: -INFINITY
+native_color: rsc2:1 allocation score on node1: -INFINITY
+native_color: rsc2:1 allocation score on node2: 11
+native_color: rsc2:1 allocation score on remote1: -INFINITY
+native_color: rsc2:2 allocation score on node1: -INFINITY
+native_color: rsc2:2 allocation score on node2: -INFINITY
+native_color: rsc2:2 allocation score on remote1: -INFINITY
+rsc2:0 promotion score on none: 0
+rsc2:1 promotion score on node2: 10
+rsc2:2 promotion score on none: 0
diff --git a/pengine/test10/remote-connection-unrecoverable.summary b/pengine/test10/remote-connection-unrecoverable.summary
new file mode 100644
index 0000000000..9bfd7bce73
--- /dev/null
+++ b/pengine/test10/remote-connection-unrecoverable.summary
@@ -0,0 +1,54 @@
+
+Current cluster status:
+Node node1 (1): UNCLEAN (offline)
+Online: [ node2 ]
+RemoteOnline: [ remote1 ]
+
+ remote1	(ocf::pacemaker:remote):	Started node1 (UNCLEAN)
+ killer	(stonith:fence_xvm):	Started node2
+ rsc1	(ocf::pacemaker:Dummy):	Started remote1
+ Master/Slave Set: rsc2-master [rsc2]
+     rsc2	(ocf::pacemaker:Stateful):	Master node1 (UNCLEAN)
+     Masters: [ node2 ]
+     Stopped: [ remote1 ]
+
+Transition Summary:
+ * Fence (reboot) remote1 'resources are active and the connection is unrecoverable'
+ * Fence (reboot) node1 'peer is no longer part of the cluster'
+ * Stop       remote1     (            node1 )   due to node availability
+ * Restart    killer      (            node2 )  
+ * Move       rsc1        ( remote1 -> node2 )  
+ * Stop       rsc2:0      (     Master node1 )   due to node availability
+
+Executing cluster transition:
+ * Resource action: killer          stop on node2
+ * Resource action: rsc1            monitor on node2
+ * Fencing node1 (reboot)
+ * Fencing remote1 (reboot)
+ * Pseudo action:   stonith_complete
+ * Pseudo action:   rsc1_stop_0
+ * Pseudo action:   rsc2-master_demote_0
+ * Pseudo action:   remote1_stop_0
+ * Resource action: rsc1            start on node2
+ * Pseudo action:   rsc2_demote_0
+ * Pseudo action:   rsc2-master_demoted_0
+ * Pseudo action:   rsc2-master_stop_0
+ * Resource action: rsc1            monitor=10000 on node2
+ * Pseudo action:   rsc2_stop_0
+ * Pseudo action:   rsc2-master_stopped_0
+ * Pseudo action:   all_stopped
+ * Resource action: killer          start on node2
+ * Resource action: killer          monitor=60000 on node2
+
+Revised cluster status:
+Online: [ node2 ]
+OFFLINE: [ node1 ]
+RemoteOFFLINE: [ remote1 ]
+
+ remote1	(ocf::pacemaker:remote):	Stopped
+ killer	(stonith:fence_xvm):	Started node2
+ rsc1	(ocf::pacemaker:Dummy):	Started node2
+ Master/Slave Set: rsc2-master [rsc2]
+     Masters: [ node2 ]
+     Stopped: [ node1 remote1 ]
+
diff --git a/pengine/test10/remote-connection-unrecoverable.xml b/pengine/test10/remote-connection-unrecoverable.xml
new file mode 100644
index 0000000000..4dda833351
--- /dev/null
+++ b/pengine/test10/remote-connection-unrecoverable.xml
@@ -0,0 +1,125 @@
+<cib crm_feature_set="3.0.10" validate-with="pacemaker-2.3" epoch="34" num_updates="1" admin_epoch="0" cib-last-written="Mon Apr  4 11:58:30 2016" update-origin="node1" update-client="crmd" update-user="hacluster" have-quorum="1" dc-uuid="2">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.16-1"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
+        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1459735110"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="1" uname="node1"/>
+      <node id="2" uname="node2"/>
+      <node id="remote1" type="remote" uname="remote1"/>
+    </nodes>
+    <resources>
+      <primitive class="ocf" id="remote1" provider="pacemaker" type="remote">
+        <instance_attributes id="remote1-instance_attributes">
+          <nvpair id="remote1-instance_attributes-reconnect_interval" name="reconnect_interval" value="60"/>
+        </instance_attributes>
+        <operations>
+          <op id="remote1-monitor-interval-20" interval="20" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive class="stonith" id="killer" type="fence_xvm">
+	<instance_attributes id="killer-instance_attributes"/>
+        <operations>
+          <op id="killer-monitor-interval-60s" interval="60s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
+        <instance_attributes id="rsc1-instance_attributes"/>
+        <operations>
+          <op id="rsc1-monitor-interval-10" interval="10" name="monitor" timeout="20"/>
+        </operations>
+      </primitive>
+      <master id="rsc2-master">
+        <primitive id="rsc2" class="ocf" provider="pacemaker" type="Stateful">
+          <instance_attributes id="rsc2-instance_attributes"/>
+          <operations>
+            <op id="rsc2-monitor-interval-10" interval="10" name="monitor" role="Master" timeout="20"/>
+            <op id="rsc2-monitor-interval-11" interval="11" name="monitor" role="Slave" timeout="20"/>
+          </operations>
+        </primitive>
+        <meta_attributes id="rsc2-master-meta_attributes">
+          <nvpair id="rsc2-master-meta_attributes-master-max" name="master-max" value="2"/>
+        </meta_attributes>
+      </master>
+    </resources>
+    <constraints>
+      <rsc_location id="location-rsc2-master" resource-discovery="exclusive" rsc="rsc2-master">
+        <rule id="location-rsc2-master-rule" score="0">
+          <expression attribute="#kind" id="location-rsc2-master-rule-expr" operation="ne" value="remote"/>
+        </rule>
+      </rsc_location>
+      <rsc_order first="rsc2-master" first-action="promote" id="order-rsc2-master-rsc1-mandatory" then="rsc1" then-action="start"/>
+      <rsc_location id="cli-ban-remote1-on-node2" rsc="remote1" role="Started" node="node2" score="-INFINITY"/>
+    </constraints>
+  </configuration>
+  <status>
+    <node_state id="1" uname="node1" in_ccm="false" crmd="offline" crm-debug-origin="do_state_transition" join="down" expected="member">
+      <lrm id="1">
+        <lrm_resources>
+          <lrm_resource id="remote1" type="remote" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="remote1_last_0" operation_key="remote1_migrate_from_0" operation="migrate_from" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="16:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;16:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node1" call-id="2" rc-code="0" op-status="0" interval="0" last-run="1459733155" last-rc-change="1459733155" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" migrate_source="node2" migrate_target="node1"/>
+            <lrm_rsc_op id="remote1_monitor_20000" operation_key="remote1_monitor_20000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="14:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;14:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node1" call-id="3" rc-code="0" op-status="0" interval="20000" last-rc-change="1459733156" exec-time="0" queue-time="0" op-digest="6e5bb737f46c381d8a46fb4162afd9e0"/>
+          </lrm_resource>
+          <lrm_resource id="killer" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="killer_last_0" operation_key="killer_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="17:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;17:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node1" call-id="18" rc-code="0" op-status="0" interval="0" last-run="1459733155" last-rc-change="1459733155" exec-time="1" queue-time="0" op-digest="208febaab0d91bc529d468f4bec44d73"/>
+            <lrm_rsc_op id="killer_monitor_60000" operation_key="killer_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="17:291:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;17:291:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node1" call-id="13" rc-code="0" op-status="0" interval="60000" last-rc-change="1459732738" exec-time="21" queue-time="0" op-digest="26ce52d3653d32c5f235c6e0a33ea4ff"/>
+          </lrm_resource>
+          <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_promote_0" operation="promote" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="19:292:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;19:292:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node1" call-id="15" rc-code="0" op-status="0" interval="0" last-run="1459732738" last-rc-change="1459732738" exec-time="33" queue-time="1" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="rsc2_last_failure_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="10:291:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;10:291:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node1" call-id="14" rc-code="0" op-status="0" interval="0" last-run="1459732738" last-rc-change="1459732738" exec-time="42" queue-time="33" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="21:293:8:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:8;21:293:8:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node1" call-id="16" rc-code="8" op-status="0" interval="10000" last-rc-change="1459732738" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="1">
+        <instance_attributes id="status-1"/>
+      </transient_attributes>
+    </node_state>
+    <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="2">
+        <instance_attributes id="status-2">
+          <nvpair id="status-2-shutdown" name="shutdown" value="0"/>
+          <nvpair id="status-2-probe_complete" name="probe_complete" value="true"/>
+          <nvpair id="status-2-master-rsc2" name="master-rsc2" value="10"/>
+        </instance_attributes>
+      </transient_attributes>
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="rsc2" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.10" transition-key="26:42:0:cf96e433-51e7-4bff-9a69-23538ab2fbe6" transition-magic="0:0;26:42:0:cf96e433-51e7-4bff-9a69-23538ab2fbe6" on_node="node2" call-id="15" rc-code="0" op-status="0" interval="0" last-run="1459489730" last-rc-change="1459489730" exec-time="57" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.10" transition-key="22:4:8:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:8;22:4:8:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node2" call-id="16" rc-code="8" op-status="0" interval="10000" last-rc-change="1459489753" exec-time="32" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+          </lrm_resource>
+          <lrm_resource id="killer" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="killer_last_0" operation_key="killer_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="18:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;18:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node2" call-id="27" rc-code="0" op-status="0" interval="0" last-run="1459733155" last-rc-change="1459733155" exec-time="22" queue-time="0" op-digest="208febaab0d91bc529d468f4bec44d73"/>
+            <lrm_rsc_op id="killer_monitor_60000" operation_key="killer_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="19:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;19:294:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node2" call-id="28" rc-code="0" op-status="0" interval="60000" last-rc-change="1459733156" exec-time="22" queue-time="0" op-digest="26ce52d3653d32c5f235c6e0a33ea4ff"/>
+          </lrm_resource>
+          <lrm_resource id="remote1" type="remote" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="remote1_last_0" operation_key="remote1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="8:299:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:7;8:299:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node2" call-id="10" rc-code="7" op-status="0" interval="0" last-run="1459735111" last-rc-change="1459735111" exec-time="0" queue-time="0" op-digest="a48beba1b11f09d6b1c15da8db5bb0a2" op-force-restart=" server " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state remote_node="true" id="remote1" uname="remote1" crm-debug-origin="do_state_transition" node_fenced="0">
+      <lrm id="remote1">
+        <lrm_resources>
+          <lrm_resource id="rsc1" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="rsc1_last_failure_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="8:15:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;8:15:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node2" call-id="176" rc-code="0" op-status="0" interval="0" last-run="1459489996" last-rc-change="1459489996" exec-time="8" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="8:15:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;8:15:7:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node2" call-id="176" rc-code="0" op-status="0" interval="0" last-run="1459489996" last-rc-change="1459489996" exec-time="8" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart=" state " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.10" transition-key="15:16:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" transition-magic="0:0;15:16:0:9dd31f23-ee0c-492f-83cb-026e8d9fede4" on_node="node2" call-id="178" rc-code="0" op-status="0" interval="10000" op-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+      <transient_attributes id="remote1">
+        <instance_attributes id="status-remote1">
+          <nvpair id="status-remote1-probe_complete" name="probe_complete" value="true"/>
+        </instance_attributes>
+      </transient_attributes>
+    </node_state>
+  </status>
+</cib>
diff --git a/tools/crm_resource.c b/tools/crm_resource.c
index 00363b758d..d3ace74390 100644
--- a/tools/crm_resource.c
+++ b/tools/crm_resource.c
@@ -1,1144 +1,1196 @@
 
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_resource.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 
 #include <stdio.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <libgen.h>
 #include <time.h>
 
 bool BE_QUIET = FALSE;
 bool scope_master = FALSE;
 int cib_options = cib_sync_call;
 
 GMainLoop *mainloop = NULL;
 
 #define message_timeout_ms 60*1000
 
 static gboolean
 resource_ipc_timeout(gpointer data)
 {
     fprintf(stderr, "No messages received in %d seconds.. aborting\n",
             (int)message_timeout_ms / 1000);
     crm_err("No messages received in %d seconds", (int)message_timeout_ms / 1000);
     return crm_exit(-1);
 }
 
 static void
 resource_ipc_connection_destroy(gpointer user_data)
 {
     crm_info("Connection to CRMd was terminated");
     crm_exit(1);
 }
 
 static void
 start_mainloop(void)
 {
     if (crmd_replies_needed == 0) {
         return;
     }
 
     mainloop = g_main_new(FALSE);
     fprintf(stderr, "Waiting for %d replies from the CRMd", crmd_replies_needed);
     crm_debug("Waiting for %d replies from the CRMd", crmd_replies_needed);
 
     g_timeout_add(message_timeout_ms, resource_ipc_timeout, NULL);
     g_main_run(mainloop);
 }
 
 static int
 resource_ipc_callback(const char *buffer, ssize_t length, gpointer userdata)
 {
     xmlNode *msg = string2xml(buffer);
 
     fprintf(stderr, ".");
     crm_log_xml_trace(msg, "[inbound]");
 
     crmd_replies_needed--;
     if (crmd_replies_needed == 0) {
         fprintf(stderr, " OK\n");
         crm_debug("Got all the replies we expected");
         return crm_exit(pcmk_ok);
     }
 
     free_xml(msg);
     return 0;
 }
 
 struct ipc_client_callbacks crm_callbacks = {
     .dispatch = resource_ipc_callback,
     .destroy = resource_ipc_connection_destroy,
 };
 
 
 /* short option letters still available: eEJkKXyYZ */
 
 /* *INDENT-OFF* */
 static struct crm_option long_options[] = {
     /* Top-level Options */
     {
-        "help", 0, 0, '?',
+        "help", no_argument, NULL, '?',
         "\t\tDisplay this text and exit"
     },
     {
-        "version", 0, 0, '$',
+        "version", no_argument, NULL, '$',
         "\t\tDisplay version information and exit"
     },
     {
-        "verbose", 0, 0, 'V',
+        "verbose", no_argument, NULL, 'V',
         "\t\tIncrease debug output (may be specified multiple times)"
     },
     {
-        "quiet", 0, 0, 'Q',
+        "quiet", no_argument, NULL, 'Q',
         "\t\tBe less descriptive in results"
     },
     {
-        "resource", 1, 0, 'r',
+        "resource", required_argument, NULL, 'r',
         "\tResource ID"
     },
 
-    {"-spacer-",1, 0, '-', "\nQueries:"},
-    {"list",       0, 0, 'L', "\t\tList all cluster resources with status"},
+    { "-spacer-", no_argument, NULL, '-', "\nQueries:" },
     {
-        "list-raw", 0, 0, 'l',
+        "list", no_argument, NULL, 'L',
+        "\t\tList all cluster resources with status"},
+    {
+        "list-raw", no_argument, NULL, 'l',
         "\t\tList IDs of all instantiated resources (individual members rather than groups etc.)"
     },
-    {"list-cts",   0, 0, 'c', NULL, pcmk_option_hidden},
     {
-        "list-operations", 0, 0, 'O',
+        "list-cts", no_argument, NULL, 'c',
+        NULL, pcmk_option_hidden
+    },
+    {
+        "list-operations", no_argument, NULL, 'O',
         "\tList active resource operations, optionally filtered by --resource and/or --node"
     },
     {
-        "list-all-operations", 0, 0, 'o',
+        "list-all-operations", no_argument, NULL, 'o',
         "List all resource operations, optionally filtered by --resource and/or --node"
     },
-    {"pending",    0, 0, 'j', "\t\tDisplay pending state if 'record-pending' is enabled", pcmk_option_hidden},
-
-    {"list-standards",        0, 0, 0, "\tList supported standards"},
-    {"list-ocf-providers",    0, 0, 0, "List all available OCF providers"},
-    {"list-agents",           1, 0, 0, "List all agents available for the named standard and/or provider."},
-    {"list-ocf-alternatives", 1, 0, 0, "List all available providers for the named OCF agent"},
-    {"show-metadata",         1, 0, 0, "Show the metadata for the named class:provider:agent"},
-
     {
-        "query-xml", 0, 0, 'q',
+        "pending", no_argument, NULL, 'j',
+        "\t\tDisplay pending state if 'record-pending' is enabled",
+        pcmk_option_hidden
+    },
+    {
+        "list-standards", no_argument, NULL, 0,
+        "\tList supported standards"
+    },
+    {
+        "list-ocf-providers", no_argument, NULL, 0,
+        "List all available OCF providers"
+    },
+    {
+        "list-agents", required_argument, NULL, 0,
+        "List all agents available for the named standard and/or provider."
+    },
+    {
+        "list-ocf-alternatives", required_argument, NULL, 0,
+        "List all available providers for the named OCF agent"
+    },
+    {
+        "show-metadata", required_argument, NULL, 0,
+        "Show the metadata for the named class:provider:agent"
+    },
+    {
+        "query-xml", no_argument, NULL, 'q',
         "\tShow XML configuration of resource (after any template expansion)"
     },
     {
-        "query-xml-raw", 0, 0, 'w',
+        "query-xml-raw", no_argument, NULL, 'w',
         "\tShow XML configuration of resource (before any template expansion)"
     },
     {
-        "get-parameter", 1, 0, 'g',
+        "get-parameter", required_argument, NULL, 'g',
         "Display named parameter for resource.\n"
         "\t\t\t\tUse instance attribute unless --meta or --utilization is specified"
     },
     {
-        "get-property", 1, 0, 'G',
+        "get-property", required_argument, NULL, 'G',
         "Display named property of resource ('class', 'type', or 'provider') (requires --resource)",
         pcmk_option_hidden
     },
     {
-        "locate", 0, 0, 'W',
+        "locate", no_argument, NULL, 'W',
         "\t\tShow node(s) currently running resource"
     },
-    {"stack",      0, 0, 'A', "\t\tDisplay the prerequisites and dependents of a resource"},
-    {"constraints",0, 0, 'a', "\tDisplay the (co)location constraints that apply to a resource"},
     {
-        "why", 0, 0, 'Y',
+        "stack", no_argument, NULL, 'A',
+        "\t\tDisplay the prerequisites and dependents of a resource"
+    },
+    {
+        "constraints", no_argument, NULL, 'a',
+        "\tDisplay the (co)location constraints that apply to a resource"
+    },
+    {
+        "why", no_argument, NULL, 'Y',
         "\t\tShow why resources are not running, optionally filtered by --resource and/or --node"
     },
-    {"-spacer-", 1, 0, '-', "\nCommands:"},
-    {"validate",   0, 0, 0, "\t\tCall the validate-all action of the local given resource"},
+
+    { "-spacer-", no_argument, NULL, '-', "\nCommands:" },
+    {
+        "validate", no_argument, NULL, 0,
+        "\t\tCall the validate-all action of the local given resource"
+    },
     {
-        "cleanup", 0, 0, 'C',
+        "cleanup", no_argument, NULL, 'C',
         "\t\tDelete resource's history (including failures) so its current state is rechecked.\n"
         "\t\t\t\tOptionally filtered by --resource, --node, --operation, and --interval (otherwise all).\n"
         "\t\t\t\tUnless --force is specified, resource's group or clone (if any) will also be cleaned"
     },
     {
-        "set-parameter", 1, 0, 'p',
+        "set-parameter", required_argument, NULL, 'p',
         "Set named parameter for resource (requires -v).\n"
         "\t\t\t\tUse instance attribute unless --meta or --utilization is specified."
     },
     {
-        "delete-parameter", 1, 0, 'd',
+        "delete-parameter", required_argument, NULL, 'd',
         "Delete named parameter for resource.\n"
         "\t\t\t\tUse instance attribute unless --meta or --utilization is specified."
     },
     {
-        "set-property", 1, 0, 'S',
+        "set-property", required_argument, NULL, 'S',
         "Set named property of resource ('class', 'type', or 'provider') (requires -r, -t, -v)",
         pcmk_option_hidden
     },
 
-    {"-spacer-", 1, 0, '-', "\nResource location:"},
+    { "-spacer-", no_argument, NULL, '-', "\nResource location:" },
     {
-        "move",    0, 0, 'M',
+        "move", no_argument, NULL, 'M',
         "\t\tCreate a constraint to move resource. If --node is specified, the constraint\n"
         "\t\t\t\twill be to move to that node, otherwise it will be to ban the current node.\n"
         "\t\t\t\tUnless --force is specified, this will return an error if the resource is\n"
         "\t\t\t\talready running on the specified node. If --force is specified, this will\n"
         "\t\t\t\talways ban the current node. Optional: --lifetime, --master.\n"
         "\t\t\t\tNOTE: This may prevent the resource from running on its previous location\n"
         "\t\t\t\tuntil the implicit constraint expires or is removed with --clear."
     },
     {
-        "ban",    0, 0, 'B',
+        "ban", no_argument, NULL, 'B',
         "\t\tCreate a constraint to keep resource off a node. Optional: --node, --lifetime, --master.\n"
         "\t\t\t\tNOTE: This will prevent the resource from running on the affected node\n"
         "\t\t\t\tuntil the implicit constraint expires or is removed with --clear.\n"
         "\t\t\t\tIf --node is not specified, it defaults to the node currently running the resource\n"
         "\t\t\t\tfor primitives and groups, or the master for master/slave clones with master-max=1\n"
         "\t\t\t\t(all other situations result in an error as there is no sane default).\n"
     },
     {
-        "clear", 0, 0, 'U',
+        "clear", no_argument, NULL, 'U',
         "\t\tRemove all constraints created by the --ban and/or --move commands.\n"
         "\t\t\t\tRequires: --resource. Optional: --node, --master.\n"
         "\t\t\t\tIf --node is not specified, all constraints created by --ban and --move\n"
         "\t\t\t\twill be removed for the named resource. If --node and --force are specified,\n"
         "\t\t\t\tany constraint created by --move will be cleared, even if it is not for the specified node."
     },
     {
-        "lifetime", 0, 0, 'u',
-        "\tLifespan (as ISO 8601 duration) of constraints created by the --ban and --move commands\n"
+        "lifetime", required_argument, NULL, 'u',
+        "\tLifespan (as ISO 8601 duration) of created constraints (with -B, -M)\n"
         "\t\t\t\t(see https://en.wikipedia.org/wiki/ISO_8601#Durations)"
     },
     {
-        "master",  0, 0,  0,
-        "\t\tLimit the scope of the --ban, --move, and --clear commands to the Master role.\n"
-        "\t\t\t\tFor --ban and --move, the previous master may remain active in the Slave role."
+        "master", no_argument, NULL, 0,
+        "\t\tLimit scope of command to the Master role (with -B, -M, -U).\n"
+        "\t\t\t\tFor -B and -M, the previous master may remain active in the Slave role."
     },
 
-    {"-spacer-",   1, 0, '-', "\nAdvanced Commands:"},
-    {"delete",     0, 0, 'D', "\t\t(Advanced) Delete a resource from the CIB. Required: -t"},
-    {"fail",       0, 0, 'F', "\t\t(Advanced) Tell the cluster this resource has failed"},
-    {"restart",    0, 0,  0,  "\t\t(Advanced) Tell the cluster to restart this resource and anything that depends on it"},
-    {"wait",       0, 0,  0,  "\t\t(Advanced) Wait until the cluster settles into a stable state"},
+    { "-spacer-", no_argument, NULL, '-', "\nAdvanced Commands:" },
+    {
+        "delete", no_argument, NULL, 'D',
+        "\t\t(Advanced) Delete a resource from the CIB. Required: -t"
+    },
+    {
+        "fail", no_argument, NULL, 'F',
+        "\t\t(Advanced) Tell the cluster this resource has failed"
+    },
+    {
+        "restart", no_argument, NULL, 0,
+        "\t\t(Advanced) Tell the cluster to restart this resource and anything that depends on it"
+    },
+    {
+        "wait", no_argument, NULL, 0,
+        "\t\t(Advanced) Wait until the cluster settles into a stable state"
+    },
     {
-        "force-demote", 0, 0, 0,
+        "force-demote", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and demote a resource on the local node.\n"
         "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
         "\t\t\t\tbelieves the resource is a clone instance already running on the local node."
     },
     {
-        "force-stop", 0, 0, 0,
+        "force-stop", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and stop a resource on the local node."
     },
     {
-        "force-start", 0, 0, 0,
+        "force-start", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and start a resource on the local node.\n"
         "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
         "\t\t\t\tbelieves the resource is a clone instance already running on the local node."
     },
     {
-        "force-promote", 0, 0, 0,
+        "force-promote", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and promote a resource on the local node.\n"
         "\t\t\t\tUnless --force is specified, this will refuse to do so if the cluster\n"
         "\t\t\t\tbelieves the resource is a clone instance already running on the local node."
     },
     {
-        "force-check", 0, 0, 0,
+        "force-check", no_argument, NULL, 0,
         "\t(Advanced) Bypass the cluster and check the state of a resource on the local node."
     },
 
-    {"-spacer-", 1, 0, '-', "\nAdditional Options:"},
+    { "-spacer-", no_argument, NULL, '-', "\nAdditional Options:" },
     {
-        "node", 1, 0, 'N',
+        "node", required_argument, NULL, 'N',
         "\tNode name"
     },
-    {"recursive",       0, 0,  0,  "\tFollow colocation chains when using --set-parameter"},
     {
-        "resource-type", 1, 0, 't',
+        "recursive", no_argument, NULL, 0,
+        "\tFollow colocation chains when using --set-parameter"
+    },
+    {
+        "resource-type", required_argument, NULL, 't',
         "Resource XML element (primitive, group, etc.) (with -D)"
     },
-    {"parameter-value", 1, 0, 'v', "Value to use with -p"},
     {
-        "meta", 0, 0, 'm',
+        "parameter-value", required_argument, NULL, 'v',
+        "Value to use with -p"
+    },
+    {
+        "meta", no_argument, NULL, 'm',
         "\t\tUse resource meta-attribute instead of instance attribute (with -p, -g, -d)"
     },
     {
-        "utilization", 0, 0, 'z',
+        "utilization", no_argument, NULL, 'z',
         "\tUse resource utilization attribute instead of instance attribute (with -p, -g, -d)"
     },
     {
-        "operation",      required_argument, NULL, 'n',
+        "operation", required_argument, NULL, 'n',
         "\tOperation to clear instead of all (with -C -r)"
     },
     {
-        "interval",       required_argument, NULL, 'I',
+        "interval", required_argument, NULL, 'I',
         "\tInterval of operation to clear (default 0) (with -C -r -n)"
     },
     {
-        "set-name", 1, 0, 's',
+        "set-name", required_argument, NULL, 's',
         "\t(Advanced) XML ID of attributes element to use (with -p, -d)"
     },
     {
-        "nvpair", 1, 0, 'i',
+        "nvpair", required_argument, NULL, 'i',
         "\t(Advanced) XML ID of nvpair element to use (with -p, -d)"
     },
     {
-        "timeout", 1, 0, 'T',
+        "timeout", required_argument, NULL, 'T',
         "\t(Advanced) Abort if command does not finish in this time (with --restart, --wait)"
     },
     {
-        "force", 0, 0, 'f',
+        "force", no_argument, NULL, 'f',
         "\t\tIf making CIB changes, do so regardless of quorum.\n"
         "\t\t\t\tSee help for individual commands for additional behavior.\n"
     },
-
-    {"xml-file", 1, 0, 'x', NULL, pcmk_option_hidden},
+    {
+        "xml-file", required_argument, NULL, 'x',
+        NULL, pcmk_option_hidden
+    },
 
     /* legacy options */
-    {"host-uname", 1, 0, 'H', NULL, pcmk_option_hidden},
-    {"migrate",    0, 0, 'M', NULL, pcmk_option_hidden},
-    {"un-migrate", 0, 0, 'U', NULL, pcmk_option_hidden},
-    {"un-move",    0, 0, 'U', NULL, pcmk_option_hidden},
-
-    {"refresh",    0, 0, 'R', NULL, pcmk_option_hidden},
-    {"reprobe",    0, 0, 'P', NULL, pcmk_option_hidden},
-
-    {"-spacer-", 1, 0, '-', "\nExamples:", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', "List the available OCF agents:", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', " crm_resource --list-agents ocf", pcmk_option_example},
-    {"-spacer-", 1, 0, '-', "List the available OCF agents from the linux-ha project:", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', " crm_resource --list-agents ocf:heartbeat", pcmk_option_example},
-    {"-spacer-", 1, 0, '-', "Move 'myResource' to a specific node:", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example},
-    {"-spacer-", 1, 0, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --clear", pcmk_option_example},
-    {"-spacer-", 1, 0, '-', "Stop 'myResource' (and anything that depends on it):", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example},
-    {"-spacer-", 1, 0, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', "The cluster will not attempt to start or stop the resource under any circumstances."},
-    {"-spacer-", 1, 0, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example},
-    {"-spacer-", 1, 0, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."},
-    {"-spacer-", 1, 0, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph},
-    {"-spacer-", 1, 0, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example},
+    {"host-uname", required_argument, NULL, 'H', NULL, pcmk_option_hidden},
+    {"migrate", no_argument, NULL, 'M', NULL, pcmk_option_hidden},
+    {"un-migrate", no_argument, NULL, 'U', NULL, pcmk_option_hidden},
+    {"un-move", no_argument, NULL, 'U', NULL, pcmk_option_hidden},
+
+    {"refresh", no_argument, NULL, 'R', NULL, pcmk_option_hidden},
+    {"reprobe", no_argument, NULL, 'P', NULL, pcmk_option_hidden},
+
+    {"-spacer-", 1, NULL, '-', "\nExamples:", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', "List the available OCF agents:", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf", pcmk_option_example},
+    {"-spacer-", 1, NULL, '-', "List the available OCF agents from the linux-ha project:", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', " crm_resource --list-agents ocf:heartbeat", pcmk_option_example},
+    {"-spacer-", 1, NULL, '-', "Move 'myResource' to a specific node:", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --move --node altNode", pcmk_option_example},
+    {"-spacer-", 1, NULL, '-', "Allow (but not force) 'myResource' to move back to its original location:", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --clear", pcmk_option_example},
+    {"-spacer-", 1, NULL, '-', "Stop 'myResource' (and anything that depends on it):", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter target-role --meta --parameter-value Stopped", pcmk_option_example},
+    {"-spacer-", 1, NULL, '-', "Tell the cluster not to manage 'myResource':", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', "The cluster will not attempt to start or stop the resource under any circumstances."},
+    {"-spacer-", 1, NULL, '-', "Useful when performing maintenance tasks on a resource.", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --set-parameter is-managed --meta --parameter-value false", pcmk_option_example},
+    {"-spacer-", 1, NULL, '-', "Erase the operation history of 'myResource' on 'aNode':", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', "The cluster will 'forget' the existing resource state (including any errors) and attempt to recover the resource."},
+    {"-spacer-", 1, NULL, '-', "Useful when a resource had failed permanently and has been repaired by an administrator.", pcmk_option_paragraph},
+    {"-spacer-", 1, NULL, '-', " crm_resource --resource myResource --cleanup --node aNode", pcmk_option_example},
 
     {0, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 int
 main(int argc, char **argv)
 {
     char rsc_cmd = 'L';
 
     const char *rsc_id = NULL;
     const char *host_uname = NULL;
     const char *prop_name = NULL;
     const char *prop_value = NULL;
     const char *rsc_type = NULL;
     const char *prop_id = NULL;
     const char *prop_set = NULL;
     const char *rsc_long_cmd = NULL;
     const char *longname = NULL;
     const char *operation = NULL;
     const char *interval = NULL;
     GHashTable *override_params = NULL;
 
     char *xml_file = NULL;
     crm_ipc_t *crmd_channel = NULL;
     pe_working_set_t data_set = { 0, };
     cib_t *cib_conn = NULL;
     bool recursive = FALSE;
     char *our_pid = NULL;
 
     bool require_resource = TRUE; /* whether command requires that resource be specified */
     bool require_dataset = TRUE;  /* whether command requires populated dataset instance */
     bool require_crmd = FALSE;    /* whether command requires connection to CRMd */
 
     int rc = pcmk_ok;
     int option_index = 0;
     int timeout_ms = 0;
     int argerr = 0;
     int flag;
 
     crm_log_cli_init("crm_resource");
     crm_set_options(NULL, "(query|command) [options]", long_options,
                     "Perform tasks related to cluster resources.\nAllows resources to be queried (definition and location), modified, and moved around the cluster.\n");
 
     while (1) {
         flag = crm_get_option_long(argc, argv, &option_index, &longname);
         if (flag == -1)
             break;
 
         switch (flag) {
             case 0: /* long options with no short equivalent */
                 if (safe_str_eq("master", longname)) {
                     scope_master = TRUE;
 
                 } else if(safe_str_eq(longname, "recursive")) {
                     recursive = TRUE;
 
                 } else if (safe_str_eq("wait", longname)) {
                     rsc_cmd = flag;
                     rsc_long_cmd = longname;
                     require_resource = FALSE;
                     require_dataset = FALSE;
 
                 } else if (
                     safe_str_eq("validate", longname)
                     || safe_str_eq("restart", longname)
                     || safe_str_eq("force-demote",  longname)
                     || safe_str_eq("force-stop",    longname)
                     || safe_str_eq("force-start",   longname)
                     || safe_str_eq("force-promote", longname)
                     || safe_str_eq("force-check",   longname)) {
                     rsc_cmd = flag;
                     rsc_long_cmd = longname;
                     crm_log_args(argc, argv);
 
                 } else if (safe_str_eq("list-ocf-providers", longname)
                            || safe_str_eq("list-ocf-alternatives", longname)
                            || safe_str_eq("list-standards", longname)) {
                     const char *text = NULL;
                     lrmd_list_t *list = NULL;
                     lrmd_list_t *iter = NULL;
                     lrmd_t *lrmd_conn = lrmd_api_new();
 
                     if (safe_str_eq("list-ocf-providers", longname)
                         || safe_str_eq("list-ocf-alternatives", longname)) {
                         rc = lrmd_conn->cmds->list_ocf_providers(lrmd_conn, optarg, &list);
                         text = "OCF providers";
 
                     } else if (safe_str_eq("list-standards", longname)) {
                         rc = lrmd_conn->cmds->list_standards(lrmd_conn, &list);
                         text = "standards";
                     }
 
                     if (rc > 0) {
                         rc = 0;
                         for (iter = list; iter != NULL; iter = iter->next) {
                             rc++;
                             printf("%s\n", iter->val);
                         }
                         lrmd_list_freeall(list);
 
                     } else if (optarg) {
                         fprintf(stderr, "No %s found for %s\n", text, optarg);
                     } else {
                         fprintf(stderr, "No %s found\n", text);
                     }
 
                     lrmd_api_delete(lrmd_conn);
                     return crm_exit(rc);
 
                 } else if (safe_str_eq("show-metadata", longname)) {
                     char *standard = NULL;
                     char *provider = NULL;
                     char *type = NULL;
                     char *metadata = NULL;
                     lrmd_t *lrmd_conn = lrmd_api_new();
 
                     rc = crm_parse_agent_spec(optarg, &standard, &provider, &type);
                     if (rc == pcmk_ok) {
                         rc = lrmd_conn->cmds->get_metadata(lrmd_conn, standard,
                                                            provider, type,
                                                            &metadata, 0);
                     } else {
                         fprintf(stderr,
                                 "'%s' is not a valid agent specification\n",
                                 optarg);
                     }
 
                     if (metadata) {
                         printf("%s\n", metadata);
                     } else {
                         fprintf(stderr, "Metadata query for %s failed: %s\n",
                                 optarg, pcmk_strerror(rc));
                     }
                     lrmd_api_delete(lrmd_conn);
                     return crm_exit(rc);
 
                 } else if (safe_str_eq("list-agents", longname)) {
                     lrmd_list_t *list = NULL;
                     lrmd_list_t *iter = NULL;
                     char *provider = strchr (optarg, ':');
                     lrmd_t *lrmd_conn = lrmd_api_new();
 
                     if (provider) {
                         *provider++ = 0;
                     }
                     rc = lrmd_conn->cmds->list_agents(lrmd_conn, &list, optarg, provider);
 
                     if (rc > 0) {
                         rc = 0;
                         for (iter = list; iter != NULL; iter = iter->next) {
                             printf("%s\n", iter->val);
                             rc++;
                         }
                         lrmd_list_freeall(list);
                         rc = 0;
                     } else {
                         fprintf(stderr, "No agents found for standard=%s, provider=%s\n",
                                 optarg, (provider? provider : "*"));
                         rc = -1;
                     }
                     lrmd_api_delete(lrmd_conn);
                     return crm_exit(rc);
 
                 } else {
                     crm_err("Unhandled long option: %s", longname);
                 }
                 break;
             case 'V':
                 do_trace = TRUE;
                 crm_bump_log_level(argc, argv);
                 break;
             case '$':
             case '?':
                 crm_help(flag, EX_OK);
                 break;
             case 'x':
                 xml_file = strdup(optarg);
                 break;
             case 'Q':
                 BE_QUIET = TRUE;
                 break;
             case 'm':
                 attr_set_type = XML_TAG_META_SETS;
                 break;
             case 'z':
                 attr_set_type = XML_TAG_UTILIZATION;
                 break;
             case 'u':
                 move_lifetime = strdup(optarg);
                 break;
             case 'f':
                 do_force = TRUE;
                 crm_log_args(argc, argv);
                 break;
             case 'i':
                 prop_id = optarg;
                 break;
             case 's':
                 prop_set = optarg;
                 break;
             case 'r':
                 rsc_id = optarg;
                 break;
             case 'v':
                 prop_value = optarg;
                 break;
             case 't':
                 rsc_type = optarg;
                 break;
             case 'T':
                 timeout_ms = crm_get_msec(optarg);
                 break;
 
             case 'C':
             case 'R':
             case 'P':
                 crm_log_args(argc, argv);
                 require_resource = FALSE;
                 require_crmd = TRUE;
                 rsc_cmd = 'C';
                 break;
 
             case 'n':
                 operation = optarg;
                 break;
 
             case 'I':
                 interval = optarg;
                 break;
 
             case 'D':
                 require_dataset = FALSE;
                 crm_log_args(argc, argv);
                 rsc_cmd = flag;
                 break;
 
             case 'F':
                 require_crmd = TRUE;
                 crm_log_args(argc, argv);
                 rsc_cmd = flag;
                 break;
 
             case 'U':
             case 'B':
             case 'M':
                 crm_log_args(argc, argv);
                 rsc_cmd = flag;
                 break;
 
             case 'c':
             case 'L':
             case 'l':
             case 'O':
             case 'o':
             case 'Y':
                 require_resource = FALSE;
                 rsc_cmd = flag;
                 break;
 
             case 'q':
             case 'w':
             case 'W':
             case 'A':
             case 'a':
                 rsc_cmd = flag;
                 break;
 
             case 'j':
                 print_pending = TRUE;
                 break;
 
             case 'S':
                 require_dataset = FALSE;
                 crm_log_args(argc, argv);
                 prop_name = optarg;
                 rsc_cmd = flag;
                 break;
 
             case 'p':
             case 'd':
                 crm_log_args(argc, argv);
                 prop_name = optarg;
                 rsc_cmd = flag;
                 break;
 
             case 'G':
             case 'g':
                 prop_name = optarg;
                 rsc_cmd = flag;
                 break;
             case 'h':
             case 'H':
             case 'N':
                 crm_trace("Option %c => %s", flag, optarg);
                 host_uname = optarg;
                 break;
 
             default:
                 CMD_ERR("Argument code 0%o (%c) is not (?yet?) supported", flag, flag);
                 ++argerr;
                 break;
         }
     }
 
     // Catch the case where the user didn't specify a command
     if (rsc_cmd == 'L') {
         require_resource = FALSE;
     }
 
     if (optind < argc
         && argv[optind] != NULL
         && rsc_cmd == 0
         && rsc_long_cmd) {
 
         override_params = crm_str_table_new();
         while (optind < argc && argv[optind] != NULL) {
             char *name = calloc(1, strlen(argv[optind]));
             char *value = calloc(1, strlen(argv[optind]));
             int rc = sscanf(argv[optind], "%[^=]=%s", name, value);
 
             if(rc == 2) {
                 g_hash_table_replace(override_params, name, value);
 
             } else {
                 CMD_ERR("Error parsing '%s' as a name=value pair for --%s", argv[optind], rsc_long_cmd);
                 free(value);
                 free(name);
                 argerr++;
             }
             optind++;
         }
 
     } else if (optind < argc && argv[optind] != NULL && rsc_cmd == 0) {
         CMD_ERR("non-option ARGV-elements: ");
         while (optind < argc && argv[optind] != NULL) {
             CMD_ERR("[%d of %d] %s ", optind, argc, argv[optind]);
             optind++;
             argerr++;
         }
     }
 
     if (optind > argc) {
         ++argerr;
     }
 
     if (argerr) {
         CMD_ERR("Invalid option(s) supplied, use --help for valid usage");
         return crm_exit(EX_USAGE);
     }
 
     our_pid = crm_getpid_s();
 
     if (do_force) {
         crm_debug("Forcing...");
         cib_options |= cib_quorum_override;
     }
 
     data_set.input = NULL; /* make clean-up easier */
  
     /* If user specified resource, look for it, even if it's optional for command */
     if (rsc_id) {
         require_resource = TRUE;
     }
 
     /* We need a dataset to find a resource, even if command doesn't need it */
     if (require_resource) {
         require_dataset = TRUE;
     }
     
     if(require_resource && rsc_id == NULL)
     {
         CMD_ERR("Must supply a resource id with -r");
         rc = -ENXIO;
         goto bail;
     }
 
     /* Establish a connection to the CIB */
     cib_conn = cib_new();
     rc = cib_conn->cmds->signon(cib_conn, crm_system_name, cib_command);
     if (rc != pcmk_ok) {
         CMD_ERR("Error signing on to the CIB service: %s", pcmk_strerror(rc));
         goto bail;
     }
 
     /* Populate working set from XML file if specified or CIB query otherwise */
     if (require_dataset) {
         xmlNode *cib_xml_copy = NULL;
 
         if (xml_file != NULL) {
             cib_xml_copy = filename2xml(xml_file);
 
         } else {
             rc = cib_conn->cmds->query(cib_conn, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
         }
 
         if(rc != pcmk_ok) {
             goto bail;
         }
 
         /* Populate the working set instance */
         set_working_set_defaults(&data_set);
         rc = update_working_set_xml(&data_set, &cib_xml_copy);
         if (rc != pcmk_ok) {
             goto bail;
         }
         cluster_status(&data_set);
 
         /* Set rc to -ENXIO if no resource matching rsc_id is found.
          * This does not bail, but is handled later for certain commands.
          * That handling could be done here instead if all flags above set
          * require_resource appropriately. */
         if (require_resource && rsc_id && (find_rsc_or_clone(rsc_id, &data_set) == NULL)) {
             rc = -ENXIO;
         }
     }
     
     /* Establish a connection to the CRMd if needed */
     if (require_crmd) {
         xmlNode *xml = NULL;
         mainloop_io_t *source =
             mainloop_add_ipc_client(CRM_SYSTEM_CRMD, G_PRIORITY_DEFAULT, 0, NULL, &crm_callbacks);
         crmd_channel = mainloop_get_ipc_client(source);
 
         if (crmd_channel == NULL) {
             CMD_ERR("Error signing on to the CRMd service");
             rc = -ENOTCONN;
             goto bail;
         }
 
         xml = create_hello_message(our_pid, crm_system_name, "0", "1");
         crm_ipc_send(crmd_channel, xml, 0, 0, NULL);
         free_xml(xml);
     }
 
     /* Handle rsc_cmd appropriately */
     if (rsc_cmd == 'L') {
         rc = pcmk_ok;
         cli_resource_print_list(&data_set, FALSE);
 
     } else if (rsc_cmd == 'l') {
         int found = 0;
         GListPtr lpc = NULL;
 
         rc = pcmk_ok;
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             resource_t *rsc = (resource_t *) lpc->data;
 
             found++;
             cli_resource_print_raw(rsc);
         }
 
         if (found == 0) {
             printf("NO resources configured\n");
             rc = -ENXIO;
         }
 
     } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "restart")) {
         resource_t *rsc = NULL;
 
         rsc = pe_find_resource(data_set.resources, rsc_id);
 
         rc = -EINVAL;
         if (rsc == NULL) {
             CMD_ERR("Resource '%s' not restarted: unknown", rsc_id);
             goto bail;
         }
 
         rc = cli_resource_restart(rsc, host_uname, timeout_ms, cib_conn);
 
     } else if (rsc_cmd == 0 && rsc_long_cmd && safe_str_eq(rsc_long_cmd, "wait")) {
         rc = wait_till_stable(timeout_ms, cib_conn);
 
     } else if (rsc_cmd == 0 && rsc_long_cmd) { /* validate or force-(stop|start|check) */
         rc = cli_resource_execute(rsc_id, rsc_long_cmd, override_params, cib_conn, &data_set);
 
     } else if (rsc_cmd == 'A' || rsc_cmd == 'a') {
         GListPtr lpc = NULL;
         resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
         xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set.input);
 
         if (rsc == NULL) {
             CMD_ERR("Must supply a resource id with -r");
             rc = -ENXIO;
             goto bail;
         }
 
         unpack_constraints(cib_constraints, &data_set);
 
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             resource_t *r = (resource_t *) lpc->data;
 
             clear_bit(r->flags, pe_rsc_allocating);
         }
 
         cli_resource_print_colocation(rsc, TRUE, rsc_cmd == 'A', 1);
 
         fprintf(stdout, "* %s\n", rsc->id);
         cli_resource_print_location(rsc, NULL);
 
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             resource_t *r = (resource_t *) lpc->data;
 
             clear_bit(r->flags, pe_rsc_allocating);
         }
 
         cli_resource_print_colocation(rsc, FALSE, rsc_cmd == 'A', 1);
 
     } else if (rsc_cmd == 'c') {
         int found = 0;
         GListPtr lpc = NULL;
 
         rc = pcmk_ok;
         for (lpc = data_set.resources; lpc != NULL; lpc = lpc->next) {
             resource_t *rsc = (resource_t *) lpc->data;
 
             cli_resource_print_cts(rsc);
             found++;
         }
         cli_resource_print_cts_constraints(&data_set);
 
     } else if (rsc_cmd == 'F') {
         rc = cli_resource_fail(crmd_channel, host_uname, rsc_id, &data_set);
         if (rc == pcmk_ok) {
             start_mainloop();
         }
 
     } else if (rsc_cmd == 'O') {
         rc = cli_resource_print_operations(rsc_id, host_uname, TRUE, &data_set);
 
     } else if (rsc_cmd == 'o') {
         rc = cli_resource_print_operations(rsc_id, host_uname, FALSE, &data_set);
 
     /* All remaining commands require that resource exist */
     } else if (rc == -ENXIO) {
         CMD_ERR("Resource '%s' not found: %s", crm_str(rsc_id), pcmk_strerror(rc));
 
     } else if (rsc_cmd == 'W') {
         rc = cli_resource_search(rsc_id, &data_set);
         if (rc >= 0) {
             rc = pcmk_ok;
         }
 
     } else if (rsc_cmd == 'q') {
         rc = cli_resource_print(rsc_id, &data_set, TRUE);
 
     } else if (rsc_cmd == 'w') {
         rc = cli_resource_print(rsc_id, &data_set, FALSE);
 
     } else if(rsc_cmd == 'Y') {
         node_t *dest = NULL;
         if (host_uname) {
             dest = pe_find_node(data_set.nodes, host_uname);
             if (dest == NULL) {
                 CMD_ERR("Unknown node: %s", host_uname);
                 rc = -ENXIO;
                 goto bail;
             }
         }
         cli_resource_why(cib_conn,data_set.resources,rsc_id,dest);
     } else if (rsc_cmd == 'U') {
         node_t *dest = NULL;
 
         if (host_uname) {
             dest = pe_find_node(data_set.nodes, host_uname);
             if (dest == NULL) {
                 CMD_ERR("Unknown node: %s", host_uname);
                 rc = -ENXIO;
                 goto bail;
             }
             rc = cli_resource_clear(rsc_id, dest->details->uname, NULL, cib_conn);
 
         } else {
             rc = cli_resource_clear(rsc_id, NULL, data_set.nodes, cib_conn);
         }
 
     } else if (rsc_cmd == 'M' && host_uname) {
         rc = cli_resource_move(rsc_id, host_uname, cib_conn, &data_set);
 
     } else if (rsc_cmd == 'B' && host_uname) {
         resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
         node_t *dest = pe_find_node(data_set.nodes, host_uname);
 
         rc = -ENXIO;
         if(rsc == NULL) {
             CMD_ERR("Resource '%s' not moved: unknown", rsc_id);
             goto bail;
 
         } else if (dest == NULL) {
             CMD_ERR("Error performing operation: node '%s' is unknown", host_uname);
             goto bail;
         }
         rc = cli_resource_ban(rsc_id, dest->details->uname, NULL, cib_conn);
 
     } else if (rsc_cmd == 'B' || rsc_cmd == 'M') {
         resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
         rc = -EINVAL;
         if(rsc == NULL) {
             CMD_ERR("Resource '%s' not moved: unknown", rsc_id);
 
         } else if(g_list_length(rsc->running_on) == 1) {
             node_t *current = rsc->running_on->data;
             rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
 
         } else if(rsc->variant == pe_master) {
             int count = 0;
             GListPtr iter = NULL;
             node_t *current = NULL;
 
             for(iter = rsc->children; iter; iter = iter->next) {
                 resource_t *child = (resource_t *)iter->data;
                 enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
                 if(child_role == RSC_ROLE_MASTER) {
                     count++;
                     current = child->running_on->data;
                 }
             }
 
             if(count == 1 && current) {
                 rc = cli_resource_ban(rsc_id, current->details->uname, NULL, cib_conn);
 
             } else {
                 CMD_ERR("Resource '%s' not moved: active in %d locations (promoted in %d).", rsc_id, g_list_length(rsc->running_on), count);
                 CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node <name>", rsc_id);
                 CMD_ERR("You can prevent '%s' from being promoted at a specific location with:"
                         " --ban --master --node <name>", rsc_id);
             }
 
         } else {
             CMD_ERR("Resource '%s' not moved: active in %d locations.", rsc_id, g_list_length(rsc->running_on));
             CMD_ERR("You can prevent '%s' from running on a specific location with: --ban --node <name>", rsc_id);
         }
 
     } else if (rsc_cmd == 'G') {
         rc = cli_resource_print_property(rsc_id, prop_name, &data_set);
 
     } else if (rsc_cmd == 'S') {
         xmlNode *msg_data = NULL;
 
         if ((rsc_type == NULL) || !strlen(rsc_type)) {
             CMD_ERR("Must specify -t with resource type");
             rc = -ENXIO;
             goto bail;
 
         } else if ((prop_value == NULL) || !strlen(prop_value)) {
             CMD_ERR("Must supply -v with new value");
             rc = -EINVAL;
             goto bail;
         }
 
         CRM_LOG_ASSERT(prop_name != NULL);
 
         msg_data = create_xml_node(NULL, rsc_type);
         crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
         crm_xml_add(msg_data, prop_name, prop_value);
 
         rc = cib_conn->cmds->modify(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
         free_xml(msg_data);
 
     } else if (rsc_cmd == 'g') {
         rc = cli_resource_print_attribute(rsc_id, prop_name, &data_set);
 
     } else if (rsc_cmd == 'p') {
         if (prop_value == NULL || strlen(prop_value) == 0) {
             CMD_ERR("You need to supply a value with the -v option");
             rc = -EINVAL;
             goto bail;
         }
 
         /* coverity[var_deref_model] False positive */
         rc = cli_resource_update_attribute(rsc_id, prop_set, prop_id, prop_name,
                                prop_value, recursive, cib_conn, &data_set);
 
     } else if (rsc_cmd == 'd') {
         /* coverity[var_deref_model] False positive */
         rc = cli_resource_delete_attribute(rsc_id, prop_set, prop_id, prop_name, cib_conn, &data_set);
     } else if ((rsc_cmd == 'C') && (rsc_id)) {
         resource_t *rsc = pe_find_resource(data_set.resources, rsc_id);
         if(do_force == FALSE) {
             rsc = uber_parent(rsc);
         }
 
         if(rsc) {
             crm_debug("Re-checking the state of %s (%s requested) on %s",
                       rsc->id, rsc_id, host_uname);
             crmd_replies_needed = 0;
             rc = cli_resource_delete(crmd_channel, host_uname, rsc, operation,
                                      interval, &data_set);
         } else {
             rc = -ENODEV;
         }
 
         if(rc == pcmk_ok && BE_QUIET == FALSE) {
             /* Now check XML_RSC_ATTR_TARGET_ROLE and XML_RSC_ATTR_MANAGED */
             cli_resource_check(cib_conn, rsc);
         }
 
         if (rc == pcmk_ok) {
             start_mainloop();
         }
 
     } else if (rsc_cmd == 'C') {
 #if HAVE_ATOMIC_ATTRD
         const char *router_node = host_uname;
         xmlNode *msg_data = NULL;
         xmlNode *cmd = NULL;
         int attr_options = attrd_opt_none;
 
         if (host_uname) {
             node_t *node = pe_find_node(data_set.nodes, host_uname);
 
             if (node && is_remote_node(node)) {
                 if (node->details->remote_rsc == NULL || node->details->remote_rsc->running_on == NULL) {
                     CMD_ERR("No lrmd connection detected to remote node %s", host_uname);
                     rc = -ENXIO;
                     goto bail;
                 }
                 node = node->details->remote_rsc->running_on->data;
                 router_node = node->details->uname;
                 attr_options |= attrd_opt_remote;
             }
         }
 
         msg_data = create_xml_node(NULL, "crm-resource-reprobe-op");
         crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
         if (safe_str_neq(router_node, host_uname)) {
             crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
         }
 
         cmd = create_request(CRM_OP_REPROBE, msg_data, router_node,
                              CRM_SYSTEM_CRMD, crm_system_name, our_pid);
         free_xml(msg_data);
 
         crm_debug("Re-checking the state of all resources on %s", host_uname?host_uname:"all nodes");
 
         rc = attrd_clear_delegate(NULL, host_uname, NULL, NULL, NULL, NULL,
                                   attr_options);
 
         if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
             start_mainloop();
         }
 
         free_xml(cmd);
 #else
         GListPtr rIter = NULL;
 
         crmd_replies_needed = 0;
         for (rIter = data_set.resources; rIter; rIter = rIter->next) {
             resource_t *rsc = rIter->data;
             cli_resource_delete(crmd_channel, host_uname, rsc, NULL, NULL,
                                 &data_set);
         }
 
         start_mainloop();
 #endif
 
     } else if (rsc_cmd == 'D') {
         xmlNode *msg_data = NULL;
 
         if (rsc_type == NULL) {
             CMD_ERR("You need to specify a resource type with -t");
             rc = -ENXIO;
             goto bail;
         }
 
         msg_data = create_xml_node(NULL, rsc_type);
         crm_xml_add(msg_data, XML_ATTR_ID, rsc_id);
 
         rc = cib_conn->cmds->delete(cib_conn, XML_CIB_TAG_RESOURCES, msg_data, cib_options);
         free_xml(msg_data);
 
     } else {
         CMD_ERR("Unknown command: %c", rsc_cmd);
     }
 
   bail:
 
     free(our_pid);
 
     if (data_set.input != NULL) {
         cleanup_alloc_calculations(&data_set);
     }
     if (cib_conn != NULL) {
         cib_conn->cmds->signoff(cib_conn);
         cib_delete(cib_conn);
     }
 
     if (rc == -pcmk_err_no_quorum) {
         CMD_ERR("Error performing operation: %s", pcmk_strerror(rc));
         CMD_ERR("Try using -f");
 
     } else if (rc != pcmk_ok) {
         CMD_ERR("Error performing operation: %s", pcmk_strerror(rc));
     }
 
     return crm_exit(rc);
 }
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index b7ada06ac0..3b1255a092 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,1801 +1,1806 @@
 
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_resource.h>
 
 bool do_trace = FALSE;
 bool do_force = FALSE;
 int crmd_replies_needed = 1; /* The welcome message */
 
 const char *attr_set_type = XML_TAG_ATTR_SETS;
 
 static int
 do_find_resource(const char *rsc, resource_t * the_rsc, pe_working_set_t * data_set)
 {
     int found = 0;
     GListPtr lpc = NULL;
 
     for (lpc = the_rsc->running_on; lpc != NULL; lpc = lpc->next) {
         node_t *node = (node_t *) lpc->data;
 
         if (BE_QUIET) {
             fprintf(stdout, "%s\n", node->details->uname);
         } else {
             const char *state = "";
 
             if (!pe_rsc_is_clone(the_rsc) && the_rsc->fns->state(the_rsc, TRUE) == RSC_ROLE_MASTER) {
                 state = "Master";
             }
             fprintf(stdout, "resource %s is running on: %s %s\n", rsc, node->details->uname, state);
         }
 
         found++;
     }
 
     if (BE_QUIET == FALSE && found == 0) {
         fprintf(stderr, "resource %s is NOT running\n", rsc);
     }
 
     return found;
 }
 
 int
 cli_resource_search(const char *rsc, pe_working_set_t * data_set)
 {
     int found = 0;
     resource_t *the_rsc = NULL;
     resource_t *parent = NULL;
 
     if (the_rsc == NULL) {
         the_rsc = pe_find_resource(data_set->resources, rsc);
     }
 
     if (the_rsc == NULL) {
         return -ENXIO;
     }
 
     if (pe_rsc_is_clone(the_rsc)) {
         GListPtr gIter = the_rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             found += do_find_resource(rsc, gIter->data, data_set);
         }
 
     /* The anonymous clone children's common ID is supplied */
     } else if ((parent = uber_parent(the_rsc)) != NULL
                && pe_rsc_is_clone(parent)
                && is_not_set(the_rsc->flags, pe_rsc_unique)
                && the_rsc->clone_name
                && safe_str_eq(rsc, the_rsc->clone_name)
                && safe_str_neq(rsc, the_rsc->id)) {
         GListPtr gIter = parent->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             found += do_find_resource(rsc, gIter->data, data_set);
         }
 
     } else {
         found += do_find_resource(rsc, the_rsc, data_set);
     }
 
     return found;
 }
 
 resource_t *
 find_rsc_or_clone(const char *rsc, pe_working_set_t * data_set)
 {
     resource_t *the_rsc = pe_find_resource(data_set->resources, rsc);
 
     if (the_rsc == NULL) {
         char *as_clone = crm_concat(rsc, "0", ':');
 
         the_rsc = pe_find_resource(data_set->resources, as_clone);
         free(as_clone);
     }
     return the_rsc;
 }
 
 #define XPATH_MAX 1024
 
 static int
 find_resource_attr(cib_t * the_cib, const char *attr, const char *rsc, const char *set_type,
                    const char *set_name, const char *attr_id, const char *attr_name, char **value)
 {
     int offset = 0;
     int rc = pcmk_ok;
     xmlNode *xml_search = NULL;
     char *xpath_string = NULL;
 
     if(value) {
         *value = NULL;
     }
 
     if(the_cib == NULL) {
         return -ENOTCONN;
     }
 
     xpath_string = calloc(1, XPATH_MAX);
     offset +=
         snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", get_object_path("resources"));
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//*[@id=\"%s\"]", rsc);
 
     if (set_type) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s", set_type);
         if (set_name) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "[@id=\"%s\"]", set_name);
         }
     }
 
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "//nvpair[");
     if (attr_id) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@id=\"%s\"", attr_id);
     }
 
     if (attr_name) {
         if (attr_id) {
             offset += snprintf(xpath_string + offset, XPATH_MAX - offset, " and ");
         }
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "@name=\"%s\"", attr_name);
     }
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "]");
     CRM_LOG_ASSERT(offset > 0);
 
     rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
 
     if (rc != pcmk_ok) {
         goto bail;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_has_children(xml_search)) {
         xmlNode *child = NULL;
 
         rc = -EINVAL;
         printf("Multiple attributes match name=%s\n", attr_name);
 
         for (child = __xml_first_child(xml_search); child != NULL; child = __xml_next(child)) {
             printf("  Value: %s \t(id=%s)\n",
                    crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
         }
 
     } else if(value) {
         const char *tmp = crm_element_value(xml_search, attr);
 
         if (tmp) {
             *value = strdup(tmp);
         }
     }
 
   bail:
     free(xpath_string);
     free_xml(xml_search);
     return rc;
 }
 
 static resource_t *
 find_matching_attr_resource(resource_t * rsc, const char * rsc_id, const char * attr_set, const char * attr_id,
                             const char * attr_name, cib_t * cib, const char * cmd)
 {
     int rc = pcmk_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
 
     if(do_force == TRUE) {
         return rsc;
 
     } else if(rsc->parent) {
         switch(rsc->parent->variant) {
             case pe_group:
                 if (BE_QUIET == FALSE) {
                     printf("Performing %s of '%s' for '%s' will not apply to its peers in '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
                 }
                 break;
             case pe_master:
             case pe_clone:
 
                 rc = find_resource_attr(cib, XML_ATTR_ID, rsc_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
                 free(local_attr_id);
 
                 if(rc != pcmk_ok) {
                     rsc = rsc->parent;
                     if (BE_QUIET == FALSE) {
                         printf("Performing %s of '%s' on '%s', the parent of '%s'\n", cmd, attr_name, rsc->id, rsc_id);
                     }
                 }
                 break;
             default:
                 break;
         }
 
     } else if (rsc->parent && BE_QUIET == FALSE) {
         printf("Forcing %s of '%s' for '%s' instead of '%s'\n", cmd, attr_name, rsc_id, rsc->parent->id);
 
     } else if(rsc->parent == NULL && rsc->children) {
         resource_t *child = rsc->children->data;
 
         if(child->variant == pe_native) {
             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
             rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name, &local_attr_id);
 
             if(rc == pcmk_ok) {
                 rsc = child;
                 if (BE_QUIET == FALSE) {
                     printf("A value for '%s' already exists in child '%s', performing %s on that instead of '%s'\n", attr_name, lookup_id, cmd, rsc_id);
                 }
             }
 
             free(local_attr_id);
             free(lookup_id);
         }
     }
 
     return rsc;
 }
 
 int
 cli_resource_update_attribute(const char *rsc_id, const char *attr_set, const char *attr_id,
                   const char *attr_name, const char *attr_value, bool recursive,
                   cib_t * cib, pe_working_set_t * data_set)
 {
     int rc = pcmk_ok;
     static bool need_init = TRUE;
 
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
     char *local_attr_set = NULL;
 
     xmlNode *xml_top = NULL;
     xmlNode *xml_obj = NULL;
 
     bool use_attributes_tag = FALSE;
     resource_t *rsc = find_rsc_or_clone(rsc_id, data_set);
 
     if (rsc == NULL) {
         return -ENXIO;
     }
 
     if(attr_id == NULL
        && do_force == FALSE
        && pcmk_ok != find_resource_attr(
            cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL)) {
         printf("\n");
     }
 
     if (safe_str_eq(attr_set_type, XML_TAG_ATTR_SETS)) {
         if (do_force == FALSE) {
             rc = find_resource_attr(cib, XML_ATTR_ID, uber_parent(rsc)->id,
                                     XML_TAG_META_SETS, attr_set, attr_id,
                                     attr_name, &local_attr_id);
             if (rc == pcmk_ok && BE_QUIET == FALSE) {
                 printf("WARNING: There is already a meta attribute for '%s' called '%s' (id=%s)\n",
                        uber_parent(rsc)->id, attr_name, local_attr_id);
                 printf("         Delete '%s' first or use --force to override\n", local_attr_id);
             }
             free(local_attr_id);
             if (rc == pcmk_ok) {
                 return -ENOTUNIQ;
             }
         }
 
     } else {
         rsc = find_matching_attr_resource(rsc, rsc_id, attr_set, attr_id, attr_name, cib, "update");
     }
 
     lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
     rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
                             &local_attr_id);
 
     if (rc == pcmk_ok) {
         crm_debug("Found a match for name=%s: id=%s", attr_name, local_attr_id);
         attr_id = local_attr_id;
 
     } else if (rc != -ENXIO) {
         free(lookup_id);
         free(local_attr_id);
         return rc;
 
     } else {
         const char *value = NULL;
         xmlNode *cib_top = NULL;
         const char *tag = crm_element_name(rsc->xml);
 
         cib->cmds->query(cib, "/cib", &cib_top,
                               cib_sync_call | cib_scope_local | cib_xpath | cib_no_children);
         value = crm_element_value(cib_top, "ignore_dtd");
         if (value != NULL) {
             use_attributes_tag = TRUE;
 
         } else {
             value = crm_element_value(cib_top, XML_ATTR_VALIDATION);
             if (crm_ends_with_ext(value, "-0.6")) {
                 use_attributes_tag = TRUE;
             }
         }
         free_xml(cib_top);
 
         if (attr_set == NULL) {
             local_attr_set = crm_concat(lookup_id, attr_set_type, '-');
             attr_set = local_attr_set;
         }
         if (attr_id == NULL) {
             local_attr_id = crm_concat(attr_set, attr_name, '-');
             attr_id = local_attr_id;
         }
 
         if (use_attributes_tag && safe_str_eq(tag, XML_CIB_TAG_MASTER)) {
             tag = "master_slave";       /* use the old name */
         }
 
         xml_top = create_xml_node(NULL, tag);
         crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
 
         xml_obj = create_xml_node(xml_top, attr_set_type);
         crm_xml_add(xml_obj, XML_ATTR_ID, attr_set);
 
         if (use_attributes_tag) {
             xml_obj = create_xml_node(xml_obj, XML_TAG_ATTRS);
         }
     }
 
     xml_obj = crm_create_nvpair_xml(xml_obj, attr_id, attr_name, attr_value);
     if (xml_top == NULL) {
         xml_top = xml_obj;
     }
 
     crm_log_xml_debug(xml_top, "Update");
 
     rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top, cib_options);
     if (rc == pcmk_ok && BE_QUIET == FALSE) {
         printf("Set '%s' option: id=%s%s%s%s%s=%s\n", lookup_id, local_attr_id,
                attr_set ? " set=" : "", attr_set ? attr_set : "",
                attr_name ? " name=" : "", attr_name ? attr_name : "", attr_value);
     }
 
     free_xml(xml_top);
 
     free(lookup_id);
     free(local_attr_id);
     free(local_attr_set);
 
     if(recursive && safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
         GListPtr lpc = NULL;
 
         if(need_init) {
             xmlNode *cib_constraints = get_object_root(XML_CIB_TAG_CONSTRAINTS, data_set->input);
 
             need_init = FALSE;
             unpack_constraints(cib_constraints, data_set);
 
             for (lpc = data_set->resources; lpc != NULL; lpc = lpc->next) {
                 resource_t *r = (resource_t *) lpc->data;
 
                 clear_bit(r->flags, pe_rsc_allocating);
             }
         }
 
         crm_debug("Looking for dependencies %p", rsc->rsc_cons_lhs);
         set_bit(rsc->flags, pe_rsc_allocating);
         for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
             rsc_colocation_t *cons = (rsc_colocation_t *) lpc->data;
             resource_t *peer = cons->rsc_lh;
 
             crm_debug("Checking %s %d", cons->id, cons->score);
             if (cons->score > 0 && is_not_set(peer->flags, pe_rsc_allocating)) {
                 /* Don't get into colocation loops */
                 crm_debug("Setting %s=%s for dependent resource %s", attr_name, attr_value, peer->id);
                 cli_resource_update_attribute(peer->id, NULL, NULL, attr_name, attr_value, recursive, cib, data_set);
             }
         }
     }
 
     return rc;
 }
 
 int
 cli_resource_delete_attribute(const char *rsc_id, const char *attr_set, const char *attr_id,
                      const char *attr_name, cib_t * cib, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = NULL;
 
     int rc = pcmk_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
     resource_t *rsc = find_rsc_or_clone(rsc_id, data_set);
 
     if (rsc == NULL) {
         return -ENXIO;
     }
 
     if(attr_id == NULL
        && do_force == FALSE
        && find_resource_attr(
            cib, XML_ATTR_ID, uber_parent(rsc)->id, NULL, NULL, NULL, attr_name, NULL) != pcmk_ok) {
         printf("\n");
     }
 
     if(safe_str_eq(attr_set_type, XML_TAG_META_SETS)) {
         rsc = find_matching_attr_resource(rsc, rsc_id, attr_set, attr_id, attr_name, cib, "delete");
     }
 
     lookup_id = clone_strip(rsc->id);
     rc = find_resource_attr(cib, XML_ATTR_ID, lookup_id, attr_set_type, attr_set, attr_id, attr_name,
                             &local_attr_id);
 
     if (rc == -ENXIO) {
         free(lookup_id);
         return pcmk_ok;
 
     } else if (rc != pcmk_ok) {
         free(lookup_id);
         return rc;
     }
 
     if (attr_id == NULL) {
         attr_id = local_attr_id;
     }
 
     xml_obj = crm_create_nvpair_xml(NULL, attr_id, attr_name, NULL);
     crm_log_xml_debug(xml_obj, "Delete");
 
     CRM_ASSERT(cib);
     rc = cib->cmds->delete(cib, XML_CIB_TAG_RESOURCES, xml_obj, cib_options);
 
     if (rc == pcmk_ok && BE_QUIET == FALSE) {
         printf("Deleted '%s' option: id=%s%s%s%s%s\n", lookup_id, local_attr_id,
                attr_set ? " set=" : "", attr_set ? attr_set : "",
                attr_name ? " name=" : "", attr_name ? attr_name : "");
     }
 
     free(lookup_id);
     free_xml(xml_obj);
     free(local_attr_id);
     return rc;
 }
 
 static int
 send_lrm_rsc_op(crm_ipc_t * crmd_channel, const char *op,
                 const char *host_uname, const char *rsc_id,
                 bool only_failed, pe_working_set_t * data_set)
 {
     char *our_pid = NULL;
     char *key = NULL;
     int rc = -ECOMM;
     xmlNode *cmd = NULL;
     xmlNode *xml_rsc = NULL;
     const char *value = NULL;
     const char *router_node = host_uname;
     xmlNode *params = NULL;
     xmlNode *msg_data = NULL;
     resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         CMD_ERR("Resource %s not found", rsc_id);
         return -ENXIO;
 
     } else if (rsc->variant != pe_native) {
         CMD_ERR("We can only process primitive resources, not %s", rsc_id);
         return -EINVAL;
 
     } else if (host_uname == NULL) {
         CMD_ERR("Please supply a node name with --node");
         return -EINVAL;
     } else {
         node_t *node = pe_find_node(data_set->nodes, host_uname);
 
         if (node && is_remote_node(node)) {
             if (node->details->remote_rsc == NULL || node->details->remote_rsc->running_on == NULL) {
                 CMD_ERR("No lrmd connection detected to remote node %s", host_uname);
                 return -ENXIO;
             }
             node = node->details->remote_rsc->running_on->data;
             router_node = node->details->uname;
         }
     }
 
     key = generate_transition_key(0, getpid(), 0, "xxxxxxxx-xrsc-opxx-xcrm-resourcexxxx");
 
     msg_data = create_xml_node(NULL, XML_GRAPH_TAG_RSC_OP);
     crm_xml_add(msg_data, XML_ATTR_TRANSITION_KEY, key);
     free(key);
 
     crm_xml_add(msg_data, XML_LRM_ATTR_TARGET, host_uname);
     if (safe_str_neq(router_node, host_uname)) {
         crm_xml_add(msg_data, XML_LRM_ATTR_ROUTER_NODE, router_node);
     }
 
     xml_rsc = create_xml_node(msg_data, XML_CIB_TAG_RESOURCE);
     if (rsc->clone_name) {
         crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->clone_name);
         crm_xml_add(xml_rsc, XML_ATTR_ID_LONG, rsc->id);
 
     } else {
         crm_xml_add(xml_rsc, XML_ATTR_ID, rsc->id);
     }
 
     value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_ATTR_TYPE);
     if (value == NULL) {
         CMD_ERR("%s has no type!  Aborting...", rsc_id);
         return -ENXIO;
     }
 
     value = crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_CLASS);
     if (value == NULL) {
         CMD_ERR("%s has no class!  Aborting...", rsc_id);
         return -ENXIO;
     }
 
     crm_copy_xml_element(rsc->xml, xml_rsc, XML_AGENT_ATTR_PROVIDER);
 
     params = create_xml_node(msg_data, XML_TAG_ATTRS);
     crm_xml_add(params, XML_ATTR_CRM_VERSION, CRM_FEATURE_SET);
 
     key = crm_meta_name(XML_LRM_ATTR_INTERVAL);
     crm_xml_add(params, key, "60000");  /* 1 minute */
     free(key);
 
     our_pid = crm_getpid_s();
     cmd = create_request(op, msg_data, router_node, CRM_SYSTEM_CRMD, crm_system_name, our_pid);
 
 /* 	crm_log_xml_warn(cmd, "send_lrm_rsc_op"); */
     free_xml(msg_data);
 
     if (crm_ipc_send(crmd_channel, cmd, 0, 0, NULL) > 0) {
         rc = 0;
 
     } else {
         crm_debug("Could not send %s op to the crmd", op);
         rc = -ENOTCONN;
     }
 
     free_xml(cmd);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Get resource name as used in failure-related node attributes
  *
  * \param[in] rsc  Resource to check
  *
  * \return Newly allocated string containing resource's fail name
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 rsc_fail_name(resource_t *rsc)
 {
     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 
     return is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
 }
 
 int
 cli_resource_delete(crm_ipc_t *crmd_channel, const char *host_uname,
                     resource_t *rsc, const char *operation,
                     const char *interval, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     node_t *node = NULL;
     char *rsc_name = NULL;
     int attr_options = attrd_opt_none;
 
     if (rsc == NULL) {
         return -ENXIO;
 
     } else if (rsc->children) {
         GListPtr lpc = NULL;
 
         for (lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             resource_t *child = (resource_t *) lpc->data;
 
             rc = cli_resource_delete(crmd_channel, host_uname, child, operation,
                                      interval, data_set);
             if(rc != pcmk_ok) {
                 return rc;
             }
         }
         return pcmk_ok;
 
     } else if (host_uname == NULL) {
         GListPtr lpc = NULL;
         GListPtr nodes = g_hash_table_get_values(rsc->known_on);
 
         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
             node = (node_t *) lpc->data;
 
             if (node->details->online) {
                 cli_resource_delete(crmd_channel, node->details->uname, rsc,
                                     operation, interval, data_set);
             }
         }
 
         g_list_free(nodes);
         return pcmk_ok;
     }
 
     node = pe_find_node(data_set->nodes, host_uname);
 
     if (node == NULL) {
         printf("Unable to clean up %s because node %s not found\n",
                rsc->id, host_uname);
         return -ENODEV;
     }
 
     if (!node->details->rsc_discovery_enabled) {
         printf("Unable to clean up %s because resource discovery disabled on %s\n",
                rsc->id, host_uname);
         return -EOPNOTSUPP;
     }
 
     /* Erase the resource's entire LRM history in the CIB, even if we're only
      * clearing a single operation's fail count. If we erased only entries for a
      * single operation, we might wind up with a wrong idea of the current
      * resource state, and we might not re-probe the resource.
      */
     rc = send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_DELETE, host_uname, rsc->id,
                          TRUE, data_set);
     if (rc != pcmk_ok) {
         printf("Unable to clean up %s history on %s: %s\n",
                rsc->id, host_uname, pcmk_strerror(rc));
         return rc;
     }
     if (node->details->remote_rsc == NULL) {
         crmd_replies_needed++;
     }
 
     rsc_name = rsc_fail_name(rsc);
     if (is_remote_node(node)) {
         attr_options |= attrd_opt_remote;
     }
     rc = attrd_clear_delegate(NULL, host_uname, rsc_name, operation, interval,
                               NULL, attr_options);
     if (rc != pcmk_ok) {
         printf("Cleaned %s history on %s, but unable to clear failures: %s\n",
                rsc->id, host_uname, pcmk_strerror(rc));
     } else {
         printf("Cleaned up %s on %s\n", rsc->id, host_uname);
     }
     free(rsc_name);
 
     return rc;
 }
 
 void
 cli_resource_check(cib_t * cib_conn, resource_t *rsc)
 {
     int need_nl = 0;
     char *role_s = NULL;
     char *managed = NULL;
     resource_t *parent = uber_parent(rsc);
 
     find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_MANAGED, &managed);
 
     find_resource_attr(cib_conn, XML_NVPAIR_ATTR_VALUE, parent->id,
                        NULL, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, &role_s);
 
     if(role_s) {
         enum rsc_role_e role = text2role(role_s);
         if(role == RSC_ROLE_UNKNOWN) {
             // Treated as if unset
 
         } else if(role == RSC_ROLE_STOPPED) {
             printf("\n  * The configuration specifies that '%s' should remain stopped\n", parent->id);
             need_nl++;
 
         } else if(parent->variant == pe_master && role == RSC_ROLE_SLAVE) {
             printf("\n  * The configuration specifies that '%s' should not be promoted\n", parent->id);
             need_nl++;
         }
     }
 
     if(managed && crm_is_true(managed) == FALSE) {
         printf("%s  * The configuration prevents the cluster from stopping or starting '%s' (unmanaged)\n", need_nl == 0?"\n":"", parent->id);
         need_nl++;
     }
 
     if(need_nl) {
         printf("\n");
     }
 }
 
 int
 cli_resource_fail(crm_ipc_t * crmd_channel, const char *host_uname,
              const char *rsc_id, pe_working_set_t * data_set)
 {
     crm_warn("Failing: %s", rsc_id);
     return send_lrm_rsc_op(crmd_channel, CRM_OP_LRM_FAIL, host_uname, rsc_id, FALSE, data_set);
 }
 
 static GHashTable *
 generate_resource_params(resource_t * rsc, pe_working_set_t * data_set)
 {
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTable *combined = NULL;
     GHashTableIter iter;
 
     if (!rsc) {
         crm_err("Resource does not exist in config");
         return NULL;
     }
 
     params = crm_str_table_new();
     meta = crm_str_table_new();
     combined = crm_str_table_new();
 
     get_rsc_attributes(params, rsc, NULL /* TODO: Pass in local node */ , data_set);
     get_meta_attributes(meta, rsc, NULL /* TODO: Pass in local node */ , data_set);
 
     if (params) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             g_hash_table_insert(combined, strdup(key), strdup(value));
         }
         g_hash_table_destroy(params);
     }
 
     if (meta) {
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             g_hash_table_insert(combined, crm_name, strdup(value));
         }
         g_hash_table_destroy(meta);
     }
 
     return combined;
 }
 
 static bool resource_is_running_on(resource_t *rsc, const char *host) 
 {
     bool found = TRUE;
     GListPtr hIter = NULL;
     GListPtr hosts = NULL;
 
     if(rsc == NULL) {
         return FALSE;
     }
 
     rsc->fns->location(rsc, &hosts, TRUE);
     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
         pe_node_t *node = (pe_node_t *) hIter->data;
 
         if(strcmp(host, node->details->uname) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         } else if(strcmp(host, node->details->id) == 0) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         }
     }
 
     if(host != NULL) {
         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
         found = FALSE;
 
     } else if(host == NULL && hosts == NULL) {
         crm_trace("Resource %s is not running\n", rsc->id);
         found = FALSE;
     }
 
   done:
 
     g_list_free(hosts);
     return found;
 }
 
 /*!
  * \internal
  * \brief Create a list of all resources active on host from a given list
  *
  * \param[in] host      Name of host to check whether resources are active
  * \param[in] rsc_list  List of resources to check
  *
  * \return New list of resources from list that are active on host
  */
 static GList *
 get_active_resources(const char *host, GList *rsc_list)
 {
     GList *rIter = NULL;
     GList *active = NULL;
 
     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
         resource_t *rsc = (resource_t *) rIter->data;
 
         /* Expand groups to their members, because if we're restarting a member
          * other than the first, we can't otherwise tell which resources are
          * stopping and starting.
          */
         if (rsc->variant == pe_group) {
             active = g_list_concat(active,
                                    get_active_resources(host, rsc->children));
         } else if (resource_is_running_on(rsc, host)) {
             active = g_list_append(active, strdup(rsc->id));
         }
     }
     return active;
 }
 
 static GList*
 subtract_lists(GList *from, GList *items) 
 {
     GList *item = NULL;
     GList *result = g_list_copy(from);
 
     for (item = items; item != NULL; item = item->next) {
         GList *candidate = NULL;
         for (candidate = from; candidate != NULL; candidate = candidate->next) {
             crm_info("Comparing %s with %s", candidate->data, item->data);
             if(strcmp(candidate->data, item->data) == 0) {
                 result = g_list_remove(result, candidate->data);
                 break;
             }
         }
     }
 
     return result;
 }
 
 static void dump_list(GList *items, const char *tag) 
 {
     int lpc = 0;
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
         lpc++;
     }
 }
 
 static void display_list(GList *items, const char *tag) 
 {
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         fprintf(stdout, "%s%s\n", tag, (const char *)item->data);
     }
 }
 
 /*!
  * \internal
  * \brief Upgrade XML to latest schema version and use it as working set input
  *
  * This also updates the working set timestamp to the current time.
  *
  * \param[in] data_set   Working set instance to update
  * \param[in] xml        XML to use as input
  *
  * \return pcmk_ok on success, -ENOKEY if unable to upgrade XML
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->now.
  * \todo This follows the example of other callers of cli_config_update()
  *       and returns -ENOKEY ("Required key not available") if that fails,
  *       but perhaps -pcmk_err_schema_validation would be better in that case.
  */
 int
 update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
 {
     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
         return -ENOKEY;
     }
     data_set->input = *xml;
     data_set->now = crm_time_new(NULL);
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Update a working set's XML input based on a CIB query
  *
  * \param[in] data_set   Data set instance to initialize
  * \param[in] cib        Connection to the CIB
  *
  * \return pcmk_ok on success, -errno on failure
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->input and data_set->now.
  */
 static int
 update_working_set_from_cib(pe_working_set_t * data_set, cib_t *cib)
 {
     xmlNode *cib_xml_copy = NULL;
     int rc;
 
     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     if (rc != pcmk_ok) {
         fprintf(stderr, "Could not obtain the current CIB: %s (%d)\n", pcmk_strerror(rc), rc);
         return rc;
     }
     rc = update_working_set_xml(data_set, &cib_xml_copy);
     if (rc != pcmk_ok) {
         fprintf(stderr, "Could not upgrade the current CIB XML\n");
         free_xml(cib_xml_copy);
         return rc;
     }
     return pcmk_ok;
 }
 
 static int
 update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
 {
     char *pid = NULL;
     char *shadow_file = NULL;
     cib_t *shadow_cib = NULL;
     int rc;
 
     cleanup_alloc_calculations(data_set);
     rc = update_working_set_from_cib(data_set, cib);
     if (rc != pcmk_ok) {
         return rc;
     }
 
     if(simulate) {
         pid = crm_getpid_s();
         shadow_cib = cib_shadow_new(pid);
         shadow_file = get_shadow_file(pid);
 
         if (shadow_cib == NULL) {
             fprintf(stderr, "Could not create shadow cib: '%s'\n", pid);
             rc = -ENXIO;
             goto cleanup;
         }
 
         rc = write_xml_file(data_set->input, shadow_file, FALSE);
 
         if (rc < 0) {
             fprintf(stderr, "Could not populate shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
         if(rc != pcmk_ok) {
             fprintf(stderr, "Could not connect to shadow cib: %s (%d)\n", pcmk_strerror(rc), rc);
             goto cleanup;
         }
 
         do_calculations(data_set, data_set->input, NULL);
         run_simulation(data_set, shadow_cib, NULL, TRUE);
         rc = update_dataset(shadow_cib, data_set, FALSE);
 
     } else {
         cluster_status(data_set);
     }
 
   cleanup:
     /* Do not free data_set->input here, we need rsc->xml to be valid later on */
     cib_delete(shadow_cib);
     free(pid);
 
     if(shadow_file) {
         unlink(shadow_file);
         free(shadow_file);
     }
 
     return rc;
 }
 
 static int
 max_delay_for_resource(pe_working_set_t * data_set, resource_t *rsc) 
 {
     int delay = 0;
     int max_delay = 0;
 
     if(rsc && rsc->children) {
         GList *iter = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             resource_t *child = (resource_t *)iter->data;
 
             delay = max_delay_for_resource(data_set, child);
             if(delay > max_delay) {
                 double seconds = delay / 1000.0;
                 crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
                 max_delay = delay;
             }
         }
 
     } else if(rsc) {
         char *key = crm_strdup_printf("%s_%s_0", rsc->id, RSC_STOP);
         action_t *stop = custom_action(rsc, key, RSC_STOP, NULL, TRUE, FALSE, data_set);
         const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
 
         max_delay = crm_int_helper(value, NULL);
         pe_free_action(stop);
     }
 
 
     return max_delay;
 }
 
 static int
 max_delay_in(pe_working_set_t * data_set, GList *resources) 
 {
     int max_delay = 0;
     GList *item = NULL;
 
     for (item = resources; item != NULL; item = item->next) {
         int delay = 0;
         resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
 
         delay = max_delay_for_resource(data_set, rsc);
 
         if(delay > max_delay) {
             double seconds = delay / 1000.0;
             crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
             max_delay = delay;
         }
     }
 
     return 5 + (max_delay / 1000);
 }
 
 #define waiting_for_starts(d, r, h) ((g_list_length(d) > 0) || \
                                     (resource_is_running_on((r), (h)) == FALSE))
 
 /*!
  * \internal
  * \brief Restart a resource (on a particular host if requested).
  *
  * \param[in] rsc        The resource to restart
  * \param[in] host       The host to restart the resource on (or NULL for all)
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but a two-second
  *                       granularity is actually used; if 0, a timeout will be
  *                       calculated based on the resource timeout)
  * \param[in] cib        Connection to the CIB for modifying/checking resource
  *
  * \return pcmk_ok on success, -errno on failure (exits on certain failures)
  */
 int
 cli_resource_restart(resource_t * rsc, const char *host, int timeout_ms, cib_t * cib)
 {
     int rc = 0;
     int lpc = 0;
     int before = 0;
     int step_timeout_s = 0;
     int sleep_interval = 2;
     int timeout = timeout_ms / 1000;
 
     bool stop_via_ban = FALSE;
     char *rsc_id = NULL;
     char *orig_target_role = NULL;
 
     GList *list_delta = NULL;
     GList *target_active = NULL;
     GList *current_active = NULL;
     GList *restart_target_active = NULL;
 
     pe_working_set_t data_set;
 
     if(resource_is_running_on(rsc, host) == FALSE) {
         const char *id = rsc->clone_name?rsc->clone_name:rsc->id;
         if(host) {
             printf("%s is not running on %s and so cannot be restarted\n", id, host);
         } else {
             printf("%s is not running anywhere and so cannot be restarted\n", id);
         }
         return -ENXIO;
     }
 
     /* We might set the target-role meta-attribute */
     attr_set_type = XML_TAG_META_SETS;
 
     rsc_id = strdup(rsc->id);
     if ((pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) && host) {
         stop_via_ban = TRUE;
     }
 
     /*
       grab full cib
       determine originally active resources
       disable or ban
       poll cib and watch for affected resources to get stopped
       without --timeout, calculate the stop timeout for each step and wait for that
       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
       if everything stopped, re-enable or un-ban
       poll cib and watch for affected resources to get started
       without --timeout, calculate the start timeout for each step and wait for that
       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
       report success
 
       Optimizations:
       - use constraints to determine ordered list of affected resources
       - Allow a --no-deps option (aka. --force-restart)
     */
 
 
     set_working_set_defaults(&data_set);
     rc = update_dataset(cib, &data_set, FALSE);
     if(rc != pcmk_ok) {
         fprintf(stdout, "Could not get new resource list: %s (%d)\n", pcmk_strerror(rc), rc);
         free(rsc_id);
         return rc;
     }
 
     restart_target_active = get_active_resources(host, data_set.resources);
     current_active = get_active_resources(host, data_set.resources);
 
     dump_list(current_active, "Origin");
 
     if (stop_via_ban) {
         /* Stop the clone or bundle instance by banning it from the host */
         BE_QUIET = TRUE;
         rc = cli_resource_ban(rsc_id, host, NULL, cib);
 
     } else {
         /* Stop the resource by setting target-role to Stopped.
          * Remember any existing target-role so we can restore it later
          * (though it only makes any difference if it's Slave).
          */
         char *lookup_id = clone_strip(rsc->id);
 
         find_resource_attr(cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
                            NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
         free(lookup_id);
         rc = cli_resource_update_attribute(rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, RSC_STOPPED, FALSE, cib, &data_set);
     }
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not set target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
         if (current_active) {
             g_list_free_full(current_active, free);
         }
         if (restart_target_active) {
             g_list_free_full(restart_target_active, free);
         }
         free(rsc_id);
         return crm_exit(rc);
     }
 
     rc = update_dataset(cib, &data_set, TRUE);
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not determine which resources would be stopped\n");
         goto failure;
     }
 
     target_active = get_active_resources(host, data_set.resources);
     dump_list(target_active, "Target");
 
     list_delta = subtract_lists(current_active, target_active);
     fprintf(stdout, "Waiting for %d resources to stop:\n", g_list_length(list_delta));
     display_list(list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while(g_list_length(list_delta) > 0) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for(lpc = 0; lpc < step_timeout_s && g_list_length(list_delta) > 0; lpc++) {
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
             rc = update_dataset(cib, &data_set, FALSE);
             if(rc != pcmk_ok) {
                 fprintf(stderr, "Could not determine which resources were stopped\n");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
             current_active = get_active_resources(host, data_set.resources);
             g_list_free(list_delta);
             list_delta = subtract_lists(current_active, target_active);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
         if(before == g_list_length(list_delta)) {
             /* aborted during stop phase, print the contents of list_delta */
             fprintf(stderr, "Could not complete shutdown of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
             display_list(list_delta, " * ");
             rc = -ETIME;
             goto failure;
         }
 
     }
 
     if (stop_via_ban) {
         rc = cli_resource_clear(rsc_id, host, NULL, cib);
 
     } else if (orig_target_role) {
         rc = cli_resource_update_attribute(rsc_id, NULL, NULL,
                                            XML_RSC_ATTR_TARGET_ROLE,
                                            orig_target_role, FALSE, cib,
                                            &data_set);
         free(orig_target_role);
         orig_target_role = NULL;
     } else {
         rc = cli_resource_delete_attribute(rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, &data_set);
     }
 
     if(rc != pcmk_ok) {
         fprintf(stderr, "Could not unset target-role for %s: %s (%d)\n", rsc_id, pcmk_strerror(rc), rc);
         free(rsc_id);
         return crm_exit(rc);
     }
 
     if (target_active) {
         g_list_free_full(target_active, free);
     }
     target_active = restart_target_active;
     if (list_delta) {
         g_list_free(list_delta);
     }
     list_delta = subtract_lists(target_active, current_active);
     fprintf(stdout, "Waiting for %d resources to start again:\n", g_list_length(list_delta));
     display_list(list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (waiting_for_starts(list_delta, rsc, host)) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(&data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
 
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
 
             rc = update_dataset(cib, &data_set, FALSE);
             if(rc != pcmk_ok) {
                 fprintf(stderr, "Could not determine which resources were started\n");
                 goto failure;
             }
 
             if (current_active) {
                 g_list_free_full(current_active, free);
             }
 
             /* It's OK if dependent resources moved to a different node,
              * so we check active resources on all nodes.
              */
             current_active = get_active_resources(NULL, data_set.resources);
             g_list_free(list_delta);
             list_delta = subtract_lists(target_active, current_active);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         if(before == g_list_length(list_delta)) {
             /* aborted during start phase, print the contents of list_delta */
             fprintf(stdout, "Could not complete restart of %s, %d resources remaining\n", rsc_id, g_list_length(list_delta));
             display_list(list_delta, " * ");
             rc = -ETIME;
             goto failure;
         }
 
     }
 
     rc = pcmk_ok;
     goto done;
 
   failure:
     if (stop_via_ban) {
         cli_resource_clear(rsc_id, host, NULL, cib);
     } else if (orig_target_role) {
         cli_resource_update_attribute(rsc_id, NULL, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE,
                                       orig_target_role, FALSE, cib, &data_set);
         free(orig_target_role);
     } else {
         cli_resource_delete_attribute(rsc_id, NULL, NULL, XML_RSC_ATTR_TARGET_ROLE, cib, &data_set);
     }
 
 done:
     if (list_delta) {
         g_list_free(list_delta);
     }
     if (current_active) {
         g_list_free_full(current_active, free);
     }
     if (target_active && (target_active != restart_target_active)) {
         g_list_free_full(target_active, free);
     }
     if (restart_target_active) {
         g_list_free_full(restart_target_active, free);
     }
     cleanup_alloc_calculations(&data_set);
     free(rsc_id);
     return rc;
 }
 
 #define action_is_pending(action) \
     ((is_set((action)->flags, pe_action_optional) == FALSE) \
     && (is_set((action)->flags, pe_action_runnable) == TRUE) \
     && (is_set((action)->flags, pe_action_pseudo) == FALSE))
 
 /*!
  * \internal
  * \brief Return TRUE if any actions in a list are pending
  *
  * \param[in] actions   List of actions to check
  *
  * \return TRUE if any actions in the list are pending, FALSE otherwise
  */
 static bool
 actions_are_pending(GListPtr actions)
 {
     GListPtr action;
 
     for (action = actions; action != NULL; action = action->next) {
         if (action_is_pending((action_t *) action->data)) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Print pending actions to stderr
  *
  * \param[in] actions   List of actions to check
  *
  * \return void
  */
 static void
 print_pending_actions(GListPtr actions)
 {
     GListPtr action;
 
     fprintf(stderr, "Pending actions:\n");
     for (action = actions; action != NULL; action = action->next) {
         action_t *a = (action_t *) action->data;
 
         if (action_is_pending(a)) {
             fprintf(stderr, "\tAction %d: %s", a->id, a->uuid);
             if (a->node) {
                 fprintf(stderr, "\ton %s", a->node->details->uname);
             }
             fprintf(stderr, "\n");
         }
     }
 }
 
 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
 
 /* For --wait, how long to sleep between cluster state checks */
 #define WAIT_SLEEP_S (2)
 
 /*!
  * \internal
  * \brief Wait until all pending cluster actions are complete
  *
  * This waits until either the CIB's transition graph is idle or a timeout is
  * reached.
  *
  * \param[in] timeout_ms Consider failed if actions do not complete in this time
  *                       (specified in milliseconds, but one-second granularity
  *                       is actually used; if 0, a default will be used)
  * \param[in] cib        Connection to the CIB
  *
  * \return pcmk_ok on success, -errno on failure
  */
 int
 wait_till_stable(int timeout_ms, cib_t * cib)
 {
     pe_working_set_t data_set;
     int rc = -1;
     int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
     time_t expire_time = time(NULL) + timeout_s;
     time_t time_diff;
 
     set_working_set_defaults(&data_set);
     do {
 
         /* Abort if timeout is reached */
         time_diff = expire_time - time(NULL);
         if (time_diff > 0) {
             crm_info("Waiting up to %d seconds for cluster actions to complete", time_diff);
         } else {
             print_pending_actions(data_set.actions);
             cleanup_alloc_calculations(&data_set);
             return -ETIME;
         }
         if (rc == pcmk_ok) { /* this avoids sleep on first loop iteration */
             sleep(WAIT_SLEEP_S);
         }
 
         /* Get latest transition graph */
         cleanup_alloc_calculations(&data_set);
         rc = update_working_set_from_cib(&data_set, cib);
         if (rc != pcmk_ok) {
             cleanup_alloc_calculations(&data_set);
             return rc;
         }
         do_calculations(&data_set, data_set.input, NULL);
 
     } while (actions_are_pending(data_set.actions));
 
     return pcmk_ok;
 }
 
 int
 cli_resource_execute(const char *rsc_id, const char *rsc_action, GHashTable *override_hash, cib_t * cib, pe_working_set_t *data_set)
 {
     int rc = pcmk_ok;
     svc_action_t *op = NULL;
     const char *rtype = NULL;
     const char *rprov = NULL;
     const char *rclass = NULL;
     const char *action = NULL;
     GHashTable *params = NULL;
     resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         CMD_ERR("Must supply a resource id with -r");
         return -ENXIO;
     }
 
     if (safe_str_eq(rsc_action, "validate")) {
         action = "validate-all";
 
     } else if (safe_str_eq(rsc_action, "force-check")) {
         action = "monitor";
 
     } else if (safe_str_eq(rsc_action, "force-stop")) {
         action = rsc_action+6;
 
     } else if (safe_str_eq(rsc_action, "force-start")
                || safe_str_eq(rsc_action, "force-demote")
                || safe_str_eq(rsc_action, "force-promote")) {
         action = rsc_action+6;
 
         if(pe_rsc_is_clone(rsc)) {
             rc = cli_resource_search(rsc_id, data_set);
             if(rc > 0 && do_force == FALSE) {
                 CMD_ERR("It is not safe to %s %s here: the cluster claims it is already active", action, rsc_id);
                 CMD_ERR("Try setting target-role=stopped first or specifying --force");
                 crm_exit(EPERM);
             }
         }
     }
 
     if(pe_rsc_is_clone(rsc)) {
         /* Grab the first child resource in the hope it's not a group */
         rsc = rsc->children->data;
     }
 
     if(rsc->variant == pe_group) {
         CMD_ERR("Sorry, --%s doesn't support group resources", rsc_action);
         crm_exit(EOPNOTSUPP);
     }
 
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
     if (safe_str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH)) {
         CMD_ERR("Sorry, --%s doesn't support %s resources yet", rsc_action, rclass);
         crm_exit(EOPNOTSUPP);
     }
 
     params = generate_resource_params(rsc, data_set);
 
     /* add crm_feature_set env needed by some resource agents */
     g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION), strdup(CRM_FEATURE_SET));
 
     op = resources_action_create(rsc->id, rclass, rprov, rtype, action, 0, -1, params, 0);
     if (op == NULL) {
         /* Re-run with stderr enabled so we can display a sane error message */
         crm_enable_stderr(TRUE);
         op = resources_action_create(rsc->id, rclass, rprov, rtype, action, 0,
                                      -1, params, 0);
 
         /* We know op will be NULL, but this makes static analysis happy */
         services_action_free(op);
 
         return crm_exit(EINVAL);
     }
 
 
     if(do_trace) {
         setenv("OCF_TRACE_RA", "1", 1);
     }
 
     if (override_hash) {
         GHashTableIter iter;
         char *name = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, override_hash);
         while (g_hash_table_iter_next(&iter, (gpointer *) & name, (gpointer *) & value)) {
             printf("Overriding the cluster configuration for '%s' with '%s' = '%s'\n",
                    rsc->id, name, value);
             g_hash_table_replace(op->params, strdup(name), strdup(value));
         }
     }
 
     if (services_action_sync(op)) {
         int more, lpc, last;
         char *local_copy = NULL;
 
         if (op->status == PCMK_LRM_OP_DONE) {
             printf("Operation %s for %s (%s:%s:%s) returned %d\n",
                    action, rsc->id, rclass, rprov ? rprov : "", rtype, op->rc);
         } else {
             printf("Operation %s for %s (%s:%s:%s) failed: %d\n",
                    action, rsc->id, rclass, rprov ? rprov : "", rtype, op->status);
         }
 
         /* hide output for validate-all if not in verbose */
         if (!do_trace && safe_str_eq(action, "validate-all"))
             goto done;
 
         if (op->stdout_data) {
             local_copy = strdup(op->stdout_data);
             more = strlen(local_copy);
             last = 0;
 
             for (lpc = 0; lpc < more; lpc++) {
                 if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
                     local_copy[lpc] = 0;
                     printf(" >  stdout: %s\n", local_copy + last);
                     last = lpc + 1;
                 }
             }
             free(local_copy);
         }
         if (op->stderr_data) {
             local_copy = strdup(op->stderr_data);
             more = strlen(local_copy);
             last = 0;
 
             for (lpc = 0; lpc < more; lpc++) {
                 if (local_copy[lpc] == '\n' || local_copy[lpc] == 0) {
                     local_copy[lpc] = 0;
                     printf(" >  stderr: %s\n", local_copy + last);
                     last = lpc + 1;
                 }
             }
             free(local_copy);
         }
     }
   done:
     rc = op->rc;
     services_action_free(op);
     return rc;
 }
 
 int
 cli_resource_move(const char *rsc_id, const char *host_name, cib_t * cib, pe_working_set_t *data_set)
 {
     int rc = -EINVAL;
     int count = 0;
     node_t *current = NULL;
     node_t *dest = pe_find_node(data_set->nodes, host_name);
     resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
     bool cur_is_dest = FALSE;
 
     if (rsc == NULL) {
         CMD_ERR("Resource '%s' not moved: not found", rsc_id);
         return -ENXIO;
 
     } else if (scope_master && rsc->variant != pe_master) {
         resource_t *p = uber_parent(rsc);
         if(p->variant == pe_master) {
             CMD_ERR("Using parent '%s' for --move command instead of '%s'.", rsc->id, rsc_id);
             rsc_id = p->id;
             rsc = p;
 
         } else {
             CMD_ERR("Ignoring '--master' option: not valid for %s resources.",
                     get_resource_typename(rsc->variant));
             scope_master = FALSE;
         }
     }
 
     if(rsc->variant == pe_master) {
         GListPtr iter = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             resource_t *child = (resource_t *)iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if(child_role == RSC_ROLE_MASTER) {
                 rsc = child;
                 count++;
             }
         }
 
         if(scope_master == FALSE && count == 0) {
             count = g_list_length(rsc->running_on);
         }
 
     } else if (pe_rsc_is_clone(rsc)) {
         count = g_list_length(rsc->running_on);
 
     } else if (g_list_length(rsc->running_on) > 1) {
         CMD_ERR("Resource '%s' not moved: active on multiple nodes", rsc_id);
         return rc;
     }
 
     if(dest == NULL) {
         CMD_ERR("Error performing operation: node '%s' is unknown", host_name);
         return -ENXIO;
     }
 
     if(g_list_length(rsc->running_on) == 1) {
         current = rsc->running_on->data;
     }
 
     if(current == NULL) {
         /* Nothing to check */
 
     } else if(scope_master && rsc->fns->state(rsc, TRUE) != RSC_ROLE_MASTER) {
         crm_trace("%s is already active on %s but not in correct state", rsc_id, dest->details->uname);
     } else if (safe_str_eq(current->details->uname, dest->details->uname)) {
         cur_is_dest = TRUE;
         if (do_force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
                      rsc_id, scope_master?"promoted":"active", dest->details->uname);
         } else {
             CMD_ERR("Error performing operation: %s is already %s on %s",
                     rsc_id, scope_master?"promoted":"active", dest->details->uname);
             return rc;
         }
     }
 
     /* Clear any previous constraints for 'dest' */
     cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib);
 
     /* Record an explicit preference for 'dest' */
     rc = cli_resource_prefer(rsc_id, dest->details->uname, cib);
 
     crm_trace("%s%s now prefers node %s%s",
               rsc->id, scope_master?" (master)":"", dest->details->uname, do_force?"(forced)":"");
 
     /* only ban the previous location if current location != destination location.
      * it is possible to use -M to enforce a location without regard of where the
      * resource is currently located */
     if(do_force && (cur_is_dest == FALSE)) {
         /* Ban the original location if possible */
         if(current) {
             (void)cli_resource_ban(rsc_id, current->details->uname, NULL, cib);
 
         } else if(count > 1) {
             CMD_ERR("Resource '%s' is currently %s in %d locations.  One may now move one to %s",
                     rsc_id, scope_master?"promoted":"active", count, dest->details->uname);
             CMD_ERR("You can prevent '%s' from being %s at a specific location with:"
                     " --ban %s--host <name>", rsc_id, scope_master?"promoted":"active", scope_master?"--master ":"");
 
         } else {
             crm_trace("Not banning %s from its current location: not active", rsc_id);
         }
     }
 
     return rc;
 }
 
 static void
 cli_resource_why_without_rsc_and_host(cib_t *cib_conn,GListPtr resources)
 {
     GListPtr lpc = NULL;
     GListPtr hosts = NULL;
 
     for (lpc = resources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         rsc->fns->location(rsc, &hosts, TRUE);
 
-	if ( hosts == NULL ) {
-	    printf("Resource %s is not running\n",rsc->id);
-	} else {
-	    printf("Resource %s is running\n",rsc->id);
-	}
+        if (hosts == NULL) {
+            printf("Resource %s is not running\n", rsc->id);
+        } else {
+            printf("Resource %s is running\n", rsc->id);
+        }
 
         cli_resource_check(cib_conn, rsc);
         g_list_free(hosts);
         hosts = NULL;
      }
 
 }
 
 static void
 cli_resource_why_with_rsc_and_host(cib_t *cib_conn,GListPtr resources,const char* rsc_id,const char* host_uname)
 {
     resource_t *rsc = NULL;
 
     rsc = pe_find_resource(resources, rsc_id);
     if((resource_is_running_on(rsc,host_uname))) {
         printf("Resource %s is running on host %s\n",rsc->id,host_uname);
     } else {
-        printf("Resource %s is assigned host %s but not running\n",rsc->id,host_uname);
-    }     
+        printf("Resource %s is not running on host %s\n", rsc->id, host_uname);
+    }
     cli_resource_check(cib_conn, rsc);
 }
 
-static void 
+static void
 cli_resource_why_without_rsc_with_host(cib_t *cib_conn,GListPtr resources,node_t *node)
 {
     const char* host_uname =  node->details->uname;
-    GListPtr allResources = node->details->allocated_rsc; 
+    GListPtr allResources = node->details->allocated_rsc;
     GListPtr activeResources = node->details->running_rsc;
     GListPtr unactiveResources = subtract_lists(allResources,activeResources);
     GListPtr lpc = NULL;
 
     for (lpc = activeResources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
         printf("Resource %s is running on host %s\n",rsc->id,host_uname);
         cli_resource_check(cib_conn,rsc);
     }
-    
+
     for(lpc = unactiveResources; lpc != NULL; lpc = lpc->next) {
         resource_t *rsc = (resource_t *) lpc->data;
-        printf("Resource %s is assigned host %s but not running\n",rsc->id,host_uname);
+        printf("Resource %s is assigned to host %s but not running\n",
+               rsc->id, host_uname);
         cli_resource_check(cib_conn,rsc);
      }
 
      g_list_free(allResources);
      g_list_free(activeResources);
      g_list_free(unactiveResources);
 }
 
 static void
 cli_resource_why_with_rsc_without_host(cib_t *cib_conn,GListPtr resources,const char* rsc_id)
 {
     resource_t *rsc = NULL;
     GListPtr hosts = NULL;
 
     rsc = pe_find_resource(resources, rsc_id);
     rsc->fns->location(rsc, &hosts, TRUE);
     if ( hosts == NULL ) {
         printf("Resource %s is not running\n", rsc->id);
     } else {
-	printf("Resource %s is running\n",rsc->id);
+        printf("Resource %s is running\n",rsc->id);
     }
     cli_resource_check(cib_conn, rsc);
 
     g_list_free(hosts);
     hosts = NULL;
 }
 
 void cli_resource_why(cib_t *cib_conn,GListPtr resources,const char* rsc_id,node_t *node)
 {
-    const char* host_uname = node == NULL ? NULL : node->details->uname; 
-
-    if (rsc_id == NULL && host_uname == NULL) {
-        cli_resource_why_without_rsc_and_host(cib_conn,resources); 
-    } else if (rsc_id != NULL && host_uname != NULL) {
-        cli_resource_why_with_rsc_and_host(cib_conn,resources,rsc_id,host_uname);
-    } else if (rsc_id == NULL && host_uname != NULL) {
-        cli_resource_why_without_rsc_with_host(cib_conn,resources,node);
-    } else if (rsc_id != NULL && host_uname == NULL) {
-        cli_resource_why_with_rsc_without_host(cib_conn,resources,rsc_id);
-    } 
+    const char *host_uname = (node == NULL)? NULL : node->details->uname;
+
+    if ((rsc_id == NULL) && (host_uname == NULL)) {
+        cli_resource_why_without_rsc_and_host(cib_conn, resources);
+
+    } else if ((rsc_id != NULL) && (host_uname != NULL)) {
+        cli_resource_why_with_rsc_and_host(cib_conn, resources, rsc_id,
+                                           host_uname);
+
+    } else if ((rsc_id == NULL) && (host_uname != NULL)) {
+        cli_resource_why_without_rsc_with_host(cib_conn, resources, node);
+
+    } else if ((rsc_id != NULL) && (host_uname == NULL)) {
+        cli_resource_why_with_rsc_without_host(cib_conn, resources, rsc_id);
+    }
 }
diff --git a/tools/fake_transition.c b/tools/fake_transition.c
index 5af86c3e78..7477850518 100644
--- a/tools/fake_transition.c
+++ b/tools/fake_transition.c
@@ -1,852 +1,853 @@
 /*
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <unistd.h>
 #include <stdlib.h>
 
 #include <sys/stat.h>
 #include <sys/param.h>
 #include <sys/types.h>
 #include <dirent.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/common/util.h>
 #include <crm/transition.h>
 #include <crm/common/iso8601.h>
 #include <crm/pengine/status.h>
 #include <allocate.h>
 #include "fake_transition.h"
 
 static bool fake_quiet = FALSE;
 static cib_t *fake_cib = NULL;
 static GListPtr fake_resource_list = NULL;
 static GListPtr fake_op_fail_list = NULL;
 gboolean bringing_nodes_online = FALSE;
 
 #define STATUS_PATH_MAX 512
 
 #define quiet_log(fmt, args...) do {              \
               if(fake_quiet) {                         \
                   crm_trace(fmt, ##args);         \
               } else {                            \
                   printf(fmt , ##args);           \
               }                                   \
     } while(0)
 
 #define new_node_template "//"XML_CIB_TAG_NODE"[@uname='%s']"
 #define node_template "//"XML_CIB_TAG_STATE"[@uname='%s']"
 #define rsc_template "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']"
 #define op_template  "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s']"
 /* #define op_template  "//"XML_CIB_TAG_STATE"[@uname='%s']//"XML_LRM_TAG_RESOURCE"[@id='%s']/"XML_LRM_TAG_RSC_OP"[@id='%s' and @"XML_LRM_ATTR_CALLID"='%d']" */
 
 
 static void
 inject_transient_attr(xmlNode * cib_node, const char *name, const char *value)
 {
     xmlNode *attrs = NULL;
     xmlNode *instance_attrs = NULL;
     xmlChar *node_path;
     const char *node_uuid = ID(cib_node);
 
     node_path = xmlGetNodePath(cib_node);
     quiet_log(" + Injecting attribute %s=%s into %s '%s'\n",
               name, value, node_path, ID(cib_node));
     free(node_path);
 
     attrs = first_named_child(cib_node, XML_TAG_TRANSIENT_NODEATTRS);
     if (attrs == NULL) {
         attrs = create_xml_node(cib_node, XML_TAG_TRANSIENT_NODEATTRS);
         crm_xml_add(attrs, XML_ATTR_ID, node_uuid);
     }
 
     instance_attrs = first_named_child(attrs, XML_TAG_ATTR_SETS);
     if (instance_attrs == NULL) {
         instance_attrs = create_xml_node(attrs, XML_TAG_ATTR_SETS);
         crm_xml_add(instance_attrs, XML_ATTR_ID, node_uuid);
     }
 
     crm_create_nvpair_xml(instance_attrs, NULL, name, value);
 }
 
 static void
 update_failcounts(xmlNode * cib_node, const char *resource, const char *task,
                   int interval, int rc)
 {
     if (rc == 0) {
         return;
 
     } else if (rc == 7 && interval == 0) {
         return;
 
     } else {
         char *name = NULL;
         char *now = crm_itoa(time(NULL));
 
         name = crm_failcount_name(resource, task, interval);
         inject_transient_attr(cib_node, name, "value++");
         free(name);
 
         name = crm_lastfailure_name(resource, task, interval);
         inject_transient_attr(cib_node, name, now);
         free(name);
         free(now);
     }
 }
 
 static void
 create_node_entry(cib_t * cib_conn, const char *node)
 {
     int rc = pcmk_ok;
     int max = strlen(new_node_template) + strlen(node) + 1;
     char *xpath = NULL;
 
     xpath = calloc(1, max);
 
     snprintf(xpath, max, new_node_template, node);
     rc = cib_conn->cmds->query(cib_conn, xpath, NULL, cib_xpath | cib_sync_call | cib_scope_local);
 
     if (rc == -ENXIO) {
         xmlNode *cib_object = create_xml_node(NULL, XML_CIB_TAG_NODE);
 
         /* Using node uname as uuid ala corosync/openais */
         crm_xml_add(cib_object, XML_ATTR_ID, node);
         crm_xml_add(cib_object, XML_ATTR_UNAME, node);
         cib_conn->cmds->create(cib_conn, XML_CIB_TAG_NODES, cib_object,
                                cib_sync_call | cib_scope_local);
         /* Not bothering with subsequent query to see if it exists,
            we'll bomb out later in the call to query_node_uuid()... */
 
         free_xml(cib_object);
     }
 
     free(xpath);
 }
 
 static lrmd_event_data_t *
 create_op(xmlNode * cib_resource, const char *task, int interval, int outcome)
 {
     lrmd_event_data_t *op = NULL;
     xmlNode *xop = NULL;
 
     op = calloc(1, sizeof(lrmd_event_data_t));
 
     op->rsc_id = strdup(ID(cib_resource));
     op->interval = interval;
     op->op_type = strdup(task);
 
     op->rc = outcome;
     op->op_status = 0;
     op->params = NULL;          /* TODO: Fill me in */
     op->t_run = time(NULL);
     op->t_rcchange = op->t_run;
 
     op->call_id = 0;
     for (xop = __xml_first_child(cib_resource); xop != NULL; xop = __xml_next(xop)) {
         int tmp = 0;
 
         crm_element_value_int(xop, XML_LRM_ATTR_CALLID, &tmp);
         if (tmp > op->call_id) {
             op->call_id = tmp;
         }
     }
     op->call_id++;
 
     return op;
 }
 
 static xmlNode *
 inject_op(xmlNode * cib_resource, lrmd_event_data_t * op, int target_rc)
 {
     return create_operation_update(cib_resource, op, CRM_FEATURE_SET, target_rc, NULL, crm_system_name,
                                    LOG_DEBUG_2);
 }
 
 static xmlNode *
 inject_node_state(cib_t * cib_conn, const char *node, const char *uuid)
 {
     int rc = pcmk_ok;
     xmlNode *cib_object = NULL;
     char *xpath = crm_strdup_printf(node_template, node);
 
     if (bringing_nodes_online) {
         create_node_entry(cib_conn, node);
     }
 
     rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object,
                                cib_xpath | cib_sync_call | cib_scope_local);
 
     if (cib_object && ID(cib_object) == NULL) {
         crm_err("Detected multiple node_state entries for xpath=%s, bailing", xpath);
         crm_log_xml_warn(cib_object, "Duplicates");
         free(xpath);
         crm_exit(ENOTUNIQ);
+        return NULL; // not reached, but makes static analysis happy
     }
 
     if (rc == -ENXIO) {
         char *found_uuid = NULL;
 
         if (uuid == NULL) {
             query_node_uuid(cib_conn, node, &found_uuid, NULL);
         } else {
             found_uuid = strdup(uuid);
         }
 
         cib_object = create_xml_node(NULL, XML_CIB_TAG_STATE);
         crm_xml_add(cib_object, XML_ATTR_UUID, found_uuid);
         crm_xml_add(cib_object, XML_ATTR_UNAME, node);
         cib_conn->cmds->create(cib_conn, XML_CIB_TAG_STATUS, cib_object,
                                cib_sync_call | cib_scope_local);
         free_xml(cib_object);
         free(found_uuid);
 
         rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object,
                                    cib_xpath | cib_sync_call | cib_scope_local);
         crm_trace("injecting node state for %s. rc is %d", node, rc);
     }
 
     free(xpath);
     CRM_ASSERT(rc == pcmk_ok);
     return cib_object;
 }
 
 static xmlNode *
 modify_node(cib_t * cib_conn, char *node, gboolean up)
 {
     xmlNode *cib_node = inject_node_state(cib_conn, node, NULL);
 
     if (up) {
         crm_xml_add(cib_node, XML_NODE_IN_CLUSTER, XML_BOOLEAN_YES);
         crm_xml_add(cib_node, XML_NODE_IS_PEER, ONLINESTATUS);
         crm_xml_add(cib_node, XML_NODE_JOIN_STATE, CRMD_JOINSTATE_MEMBER);
         crm_xml_add(cib_node, XML_NODE_EXPECTED, CRMD_JOINSTATE_MEMBER);
 
     } else {
         crm_xml_add(cib_node, XML_NODE_IN_CLUSTER, XML_BOOLEAN_NO);
         crm_xml_add(cib_node, XML_NODE_IS_PEER, OFFLINESTATUS);
         crm_xml_add(cib_node, XML_NODE_JOIN_STATE, CRMD_JOINSTATE_DOWN);
         crm_xml_add(cib_node, XML_NODE_EXPECTED, CRMD_JOINSTATE_DOWN);
     }
 
     crm_xml_add(cib_node, XML_ATTR_ORIGIN, crm_system_name);
     return cib_node;
 }
 
 static xmlNode *
 find_resource_xml(xmlNode * cib_node, const char *resource)
 {
     char *xpath = NULL;
     xmlNode *match = NULL;
     const char *node = crm_element_value(cib_node, XML_ATTR_UNAME);
     int max = strlen(rsc_template) + strlen(node) + strlen(resource) + 1;
 
     xpath = calloc(1, max);
 
     snprintf(xpath, max, rsc_template, node, resource);
     match = get_xpath_object(xpath, cib_node, LOG_DEBUG_2);
 
     free(xpath);
     return match;
 }
 
 
 static xmlNode *
 inject_resource(xmlNode * cib_node, const char *resource, const char *rclass, const char *rtype,
                 const char *rprovider)
 {
     xmlNode *lrm = NULL;
     xmlNode *container = NULL;
     xmlNode *cib_resource = NULL;
     char *xpath = NULL;
 
     cib_resource = find_resource_xml(cib_node, resource);
     if (cib_resource != NULL) {
         return cib_resource;
     }
 
     /* One day, add query for class, provider, type */
 
     if (rclass == NULL || rtype == NULL) {
         fprintf(stderr, "Resource %s not found in the status section of %s."
                 "  Please supply the class and type to continue\n", resource, ID(cib_node));
         return NULL;
 
     } else if (safe_str_neq(rclass, PCMK_RESOURCE_CLASS_OCF)
                && safe_str_neq(rclass, PCMK_RESOURCE_CLASS_STONITH)
                && safe_str_neq(rclass, PCMK_RESOURCE_CLASS_HB)
                && safe_str_neq(rclass, PCMK_RESOURCE_CLASS_SERVICE)
                && safe_str_neq(rclass, PCMK_RESOURCE_CLASS_UPSTART)
                && safe_str_neq(rclass, PCMK_RESOURCE_CLASS_SYSTEMD)
                && safe_str_neq(rclass, PCMK_RESOURCE_CLASS_LSB)) {
         fprintf(stderr, "Invalid class for %s: %s\n", resource, rclass);
         return NULL;
 
     } else if (crm_provider_required(rclass) && (rprovider == NULL)) {
         fprintf(stderr, "Please specify the provider for resource %s\n", resource);
         return NULL;
     }
 
     xpath = (char *)xmlGetNodePath(cib_node);
     crm_info("Injecting new resource %s into %s '%s'", resource, xpath, ID(cib_node));
     free(xpath);
 
     lrm = first_named_child(cib_node, XML_CIB_TAG_LRM);
     if (lrm == NULL) {
         const char *node_uuid = ID(cib_node);
 
         lrm = create_xml_node(cib_node, XML_CIB_TAG_LRM);
         crm_xml_add(lrm, XML_ATTR_ID, node_uuid);
     }
 
     container = first_named_child(lrm, XML_LRM_TAG_RESOURCES);
     if (container == NULL) {
         container = create_xml_node(lrm, XML_LRM_TAG_RESOURCES);
     }
 
     cib_resource = create_xml_node(container, XML_LRM_TAG_RESOURCE);
     crm_xml_add(cib_resource, XML_ATTR_ID, resource);
 
     crm_xml_add(cib_resource, XML_AGENT_ATTR_CLASS, rclass);
     crm_xml_add(cib_resource, XML_AGENT_ATTR_PROVIDER, rprovider);
     crm_xml_add(cib_resource, XML_ATTR_TYPE, rtype);
 
     return cib_resource;
 }
 
 #define XPATH_MAX 1024
 
 static int
 find_ticket_state(cib_t * the_cib, const char *ticket_id, xmlNode ** ticket_state_xml)
 {
     int offset = 0;
     int rc = pcmk_ok;
     xmlNode *xml_search = NULL;
 
     char *xpath_string = NULL;
 
     CRM_ASSERT(ticket_state_xml != NULL);
     *ticket_state_xml = NULL;
 
     xpath_string = calloc(1, XPATH_MAX);
     offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "%s", "/cib/status/tickets");
 
     if (ticket_id) {
         offset += snprintf(xpath_string + offset, XPATH_MAX - offset, "/%s[@id=\"%s\"]",
                            XML_CIB_TAG_TICKET_STATE, ticket_id);
     }
     CRM_LOG_ASSERT(offset > 0);
     rc = the_cib->cmds->query(the_cib, xpath_string, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
 
     if (rc != pcmk_ok) {
         goto bail;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_has_children(xml_search)) {
         if (ticket_id) {
             fprintf(stdout, "Multiple ticket_states match ticket_id=%s\n", ticket_id);
         }
         *ticket_state_xml = xml_search;
     } else {
         *ticket_state_xml = xml_search;
     }
 
   bail:
     free(xpath_string);
     return rc;
 }
 
 static int
 set_ticket_state_attr(const char *ticket_id, const char *attr_name,
                       const char *attr_value, cib_t * cib, int cib_options)
 {
     int rc = pcmk_ok;
     xmlNode *xml_top = NULL;
     xmlNode *ticket_state_xml = NULL;
 
     rc = find_ticket_state(cib, ticket_id, &ticket_state_xml);
     if (rc == pcmk_ok) {
         crm_debug("Found a match state for ticket: id=%s", ticket_id);
         xml_top = ticket_state_xml;
 
     } else if (rc != -ENXIO) {
         return rc;
 
     } else {
         xmlNode *xml_obj = NULL;
 
         xml_top = create_xml_node(NULL, XML_CIB_TAG_STATUS);
         xml_obj = create_xml_node(xml_top, XML_CIB_TAG_TICKETS);
         ticket_state_xml = create_xml_node(xml_obj, XML_CIB_TAG_TICKET_STATE);
         crm_xml_add(ticket_state_xml, XML_ATTR_ID, ticket_id);
     }
 
     crm_xml_add(ticket_state_xml, attr_name, attr_value);
 
     crm_log_xml_debug(xml_top, "Update");
 
     rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, xml_top, cib_options);
 
     free_xml(xml_top);
 
     return rc;
 }
 
 void
 modify_configuration(pe_working_set_t * data_set, cib_t *cib,
                      const char *quorum, const char *watchdog, GListPtr node_up, GListPtr node_down, GListPtr node_fail,
                      GListPtr op_inject, GListPtr ticket_grant, GListPtr ticket_revoke,
                      GListPtr ticket_standby, GListPtr ticket_activate)
 {
     int rc = pcmk_ok;
     GListPtr gIter = NULL;
 
     xmlNode *cib_op = NULL;
     xmlNode *cib_node = NULL;
     xmlNode *cib_resource = NULL;
 
     lrmd_event_data_t *op = NULL;
 
     if (quorum) {
         xmlNode *top = create_xml_node(NULL, XML_TAG_CIB);
 
         quiet_log(" + Setting quorum: %s\n", quorum);
         /* crm_xml_add(top, XML_ATTR_DC_UUID, dc_uuid);      */
         crm_xml_add(top, XML_ATTR_HAVE_QUORUM, quorum);
 
         rc = cib->cmds->modify(cib, NULL, top, cib_sync_call | cib_scope_local);
         CRM_ASSERT(rc == pcmk_ok);
     }
 
     if (watchdog) {
         quiet_log(" + Setting watchdog: %s\n", watchdog);
 
         rc = update_attr_delegate(cib, cib_sync_call | cib_scope_local,
                              XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL,
                              XML_ATTR_HAVE_WATCHDOG, watchdog, FALSE, NULL, NULL);
 
         CRM_ASSERT(rc == pcmk_ok);
     }
 
     for (gIter = node_up; gIter != NULL; gIter = gIter->next) {
         char *node = (char *)gIter->data;
 
         quiet_log(" + Bringing node %s online\n", node);
         cib_node = modify_node(cib, node, TRUE);
         CRM_ASSERT(cib_node != NULL);
 
         rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, cib_node,
                                       cib_sync_call | cib_scope_local);
         CRM_ASSERT(rc == pcmk_ok);
         free_xml(cib_node);
     }
 
     for (gIter = node_down; gIter != NULL; gIter = gIter->next) {
         char xpath[STATUS_PATH_MAX];
         char *node = (char *)gIter->data;
 
         quiet_log(" + Taking node %s offline\n", node);
         cib_node = modify_node(cib, node, FALSE);
         CRM_ASSERT(cib_node != NULL);
 
         rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, cib_node,
                                       cib_sync_call | cib_scope_local);
         CRM_ASSERT(rc == pcmk_ok);
         free_xml(cib_node);
 
         snprintf(xpath, STATUS_PATH_MAX, "//node_state[@uname='%s']/%s", node, XML_CIB_TAG_LRM);
         cib->cmds->delete(cib, xpath, NULL,
                                       cib_xpath | cib_sync_call | cib_scope_local);
 
         snprintf(xpath, STATUS_PATH_MAX, "//node_state[@uname='%s']/%s", node,
                  XML_TAG_TRANSIENT_NODEATTRS);
         cib->cmds->delete(cib, xpath, NULL,
                                       cib_xpath | cib_sync_call | cib_scope_local);
 
     }
 
     for (gIter = node_fail; gIter != NULL; gIter = gIter->next) {
         char *node = (char *)gIter->data;
 
         quiet_log(" + Failing node %s\n", node);
         cib_node = modify_node(cib, node, TRUE);
         crm_xml_add(cib_node, XML_NODE_IN_CLUSTER, XML_BOOLEAN_NO);
         CRM_ASSERT(cib_node != NULL);
 
         rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, cib_node,
                                       cib_sync_call | cib_scope_local);
         CRM_ASSERT(rc == pcmk_ok);
         free_xml(cib_node);
     }
 
     for (gIter = ticket_grant; gIter != NULL; gIter = gIter->next) {
         char *ticket_id = (char *)gIter->data;
 
         quiet_log(" + Granting ticket %s\n", ticket_id);
         rc = set_ticket_state_attr(ticket_id, "granted", "true",
                                    cib, cib_sync_call | cib_scope_local);
 
         CRM_ASSERT(rc == pcmk_ok);
     }
 
     for (gIter = ticket_revoke; gIter != NULL; gIter = gIter->next) {
         char *ticket_id = (char *)gIter->data;
 
         quiet_log(" + Revoking ticket %s\n", ticket_id);
         rc = set_ticket_state_attr(ticket_id, "granted", "false",
                                    cib, cib_sync_call | cib_scope_local);
 
         CRM_ASSERT(rc == pcmk_ok);
     }
 
     for (gIter = ticket_standby; gIter != NULL; gIter = gIter->next) {
         char *ticket_id = (char *)gIter->data;
 
         quiet_log(" + Making ticket %s standby\n", ticket_id);
         rc = set_ticket_state_attr(ticket_id, "standby", "true",
                                    cib, cib_sync_call | cib_scope_local);
 
         CRM_ASSERT(rc == pcmk_ok);
     }
 
     for (gIter = ticket_activate; gIter != NULL; gIter = gIter->next) {
         char *ticket_id = (char *)gIter->data;
 
         quiet_log(" + Activating ticket %s\n", ticket_id);
         rc = set_ticket_state_attr(ticket_id, "standby", "false",
                                    cib, cib_sync_call | cib_scope_local);
 
         CRM_ASSERT(rc == pcmk_ok);
     }
 
     for (gIter = op_inject; gIter != NULL; gIter = gIter->next) {
         char *spec = (char *)gIter->data;
 
         int rc = 0;
         int outcome = 0;
         int interval = 0;
 
         char *key = NULL;
         char *node = NULL;
         char *task = NULL;
         char *resource = NULL;
 
         const char *rtype = NULL;
         const char *rclass = NULL;
         const char *rprovider = NULL;
 
         resource_t *rsc = NULL;
 
         quiet_log(" + Injecting %s into the configuration\n", spec);
 
         key = calloc(1, strlen(spec) + 1);
         node = calloc(1, strlen(spec) + 1);
         rc = sscanf(spec, "%[^@]@%[^=]=%d", key, node, &outcome);
         if (rc != 3) {
             fprintf(stderr, "Invalid operation spec: %s.  Only found %d fields\n", spec, rc);
             free(key);
             free(node);
             continue;
         }
 
         parse_op_key(key, &resource, &task, &interval);
 
         rsc = pe_find_resource(data_set->resources, resource);
         if (rsc == NULL) {
             fprintf(stderr, " - Invalid resource name: %s\n", resource);
         } else {
             rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
             rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
             rprovider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
 
             cib_node = inject_node_state(cib, node, NULL);
             CRM_ASSERT(cib_node != NULL);
 
             update_failcounts(cib_node, resource, task, interval, outcome);
 
             cib_resource = inject_resource(cib_node, resource, rclass, rtype, rprovider);
             CRM_ASSERT(cib_resource != NULL);
 
             op = create_op(cib_resource, task, interval, outcome);
             CRM_ASSERT(op != NULL);
 
             cib_op = inject_op(cib_resource, op, 0);
             CRM_ASSERT(cib_op != NULL);
             lrmd_free_event(op);
 
             rc = cib->cmds->modify(cib, XML_CIB_TAG_STATUS, cib_node,
                                           cib_sync_call | cib_scope_local);
             CRM_ASSERT(rc == pcmk_ok);
         }
         free(task);
         free(node);
         free(key);
     }
 }
 
 static gboolean
 exec_pseudo_action(crm_graph_t * graph, crm_action_t * action)
 {
     const char *node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
     const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK_KEY);
 
     action->confirmed = TRUE;
 
     quiet_log(" * Pseudo action:   %s%s%s\n", task, node ? " on " : "", node ? node : "");
     update_graph(graph, action);
     return TRUE;
 }
 
 static gboolean
 exec_rsc_action(crm_graph_t * graph, crm_action_t * action)
 {
     int rc = 0;
     GListPtr gIter = NULL;
     lrmd_event_data_t *op = NULL;
     int target_outcome = 0;
     gboolean uname_is_uuid = FALSE;
 
     const char *rtype = NULL;
     const char *rclass = NULL;
     const char *resource = NULL;
     const char *rprovider = NULL;
     const char *operation = crm_element_value(action->xml, "operation");
     const char *target_rc_s = crm_meta_value(action->params, XML_ATTR_TE_TARGET_RC);
 
     xmlNode *cib_node = NULL;
     xmlNode *cib_resource = NULL;
     xmlNode *action_rsc = first_named_child(action->xml, XML_CIB_TAG_RESOURCE);
 
     char *node = crm_element_value_copy(action->xml, XML_LRM_ATTR_TARGET);
     char *uuid = crm_element_value_copy(action->xml, XML_LRM_ATTR_TARGET_UUID);
     const char *router_node = crm_element_value(action->xml, XML_LRM_ATTR_ROUTER_NODE);
 
     if (safe_str_eq(operation, CRM_OP_PROBED)
         || safe_str_eq(operation, CRM_OP_REPROBE)) {
         crm_info("Skipping %s op for %s", operation, node);
         goto done;
     }
 
     if (action_rsc == NULL) {
         crm_log_xml_err(action->xml, "Bad");
         free(node); free(uuid);
         return FALSE;
     }
 
     /* Look for the preferred name
      * If not found, try the expected 'local' name
      * If not found use the preferred name anyway
      */
     resource = crm_element_value(action_rsc, XML_ATTR_ID);
     if (pe_find_resource(fake_resource_list, resource) == NULL) {
         const char *longname = crm_element_value(action_rsc, XML_ATTR_ID_LONG);
 
         if (pe_find_resource(fake_resource_list, longname)) {
             resource = longname;
         }
     }
 
     if (safe_str_eq(operation, "delete") || safe_str_eq(operation, RSC_METADATA)) {
         quiet_log(" * Resource action: %-15s %s on %s\n", resource, operation, node);
         goto done;
     }
 
     rclass = crm_element_value(action_rsc, XML_AGENT_ATTR_CLASS);
     rtype = crm_element_value(action_rsc, XML_ATTR_TYPE);
     rprovider = crm_element_value(action_rsc, XML_AGENT_ATTR_PROVIDER);
 
     if (target_rc_s != NULL) {
         target_outcome = crm_parse_int(target_rc_s, "0");
     }
 
     CRM_ASSERT(fake_cib->cmds->query(fake_cib, NULL, NULL, cib_sync_call | cib_scope_local) ==
                pcmk_ok);
 
     if (router_node) {
         uname_is_uuid = TRUE;
     }
 
     cib_node = inject_node_state(fake_cib, node, uname_is_uuid ? node : uuid);
     CRM_ASSERT(cib_node != NULL);
 
     cib_resource = inject_resource(cib_node, resource, rclass, rtype, rprovider);
     CRM_ASSERT(cib_resource != NULL);
 
     op = convert_graph_action(cib_resource, action, 0, target_outcome);
     if (op->interval) {
         quiet_log(" * Resource action: %-15s %s=%d on %s\n", resource, op->op_type, op->interval,
                   node);
     } else {
         quiet_log(" * Resource action: %-15s %s on %s\n", resource, op->op_type, node);
     }
 
     for (gIter = fake_op_fail_list; gIter != NULL; gIter = gIter->next) {
         char *spec = (char *)gIter->data;
         char *key = NULL;
 
         key = calloc(1, 1 + strlen(spec));
         snprintf(key, strlen(spec), "%s_%s_%d@%s=", resource, op->op_type, op->interval, node);
 
         if (strncasecmp(key, spec, strlen(key)) == 0) {
             rc = sscanf(spec, "%*[^=]=%d", (int *) &op->rc);
             // ${resource}_${task}_${interval}@${node}=${rc}
 
             if (rc != 1) {
                 fprintf(stderr,
                         "Invalid failed operation spec: %s. Result code must be integer\n",
                         spec);
                 free(key);
                 continue;
             }
             action->failed = TRUE;
             graph->abort_priority = INFINITY;
             printf("\tPretending action %d failed with rc=%d\n", action->id, op->rc);
             update_failcounts(cib_node, resource, op->op_type, op->interval, op->rc);
             free(key);
             break;
         }
         free(key);
     }
 
     inject_op(cib_resource, op, target_outcome);
     lrmd_free_event(op);
 
     rc = fake_cib->cmds->modify(fake_cib, XML_CIB_TAG_STATUS, cib_node,
                                   cib_sync_call | cib_scope_local);
     CRM_ASSERT(rc == pcmk_ok);
 
   done:
     free(node); free(uuid);
     free_xml(cib_node);
     action->confirmed = TRUE;
     update_graph(graph, action);
     return TRUE;
 }
 
 static gboolean
 exec_crmd_action(crm_graph_t * graph, crm_action_t * action)
 {
     const char *node = crm_element_value(action->xml, XML_LRM_ATTR_TARGET);
     const char *task = crm_element_value(action->xml, XML_LRM_ATTR_TASK);
     xmlNode *rsc = first_named_child(action->xml, XML_CIB_TAG_RESOURCE);
 
     action->confirmed = TRUE;
 
     if(rsc) {
         quiet_log(" * Cluster action:  %s for %s on %s\n", task, ID(rsc), node);
     } else {
         quiet_log(" * Cluster action:  %s on %s\n", task, node);
     }
     update_graph(graph, action);
     return TRUE;
 }
 
 static gboolean
 exec_stonith_action(crm_graph_t * graph, crm_action_t * action)
 {
     const char *op = crm_meta_value(action->params, "stonith_action");
     char *target = crm_element_value_copy(action->xml, XML_LRM_ATTR_TARGET);
 
     quiet_log(" * Fencing %s (%s)\n", target, op);
     if(safe_str_neq(op, "on")) {
         int rc = 0;
         char xpath[STATUS_PATH_MAX];
         xmlNode *cib_node = modify_node(fake_cib, target, FALSE);
 
         crm_xml_add(cib_node, XML_ATTR_ORIGIN, __FUNCTION__);
         CRM_ASSERT(cib_node != NULL);
 
         rc = fake_cib->cmds->replace(fake_cib, XML_CIB_TAG_STATUS, cib_node,
                                    cib_sync_call | cib_scope_local);
         CRM_ASSERT(rc == pcmk_ok);
 
         snprintf(xpath, STATUS_PATH_MAX, "//node_state[@uname='%s']/%s", target, XML_CIB_TAG_LRM);
         fake_cib->cmds->delete(fake_cib, xpath, NULL,
                                       cib_xpath | cib_sync_call | cib_scope_local);
 
         snprintf(xpath, STATUS_PATH_MAX, "//node_state[@uname='%s']/%s", target,
                  XML_TAG_TRANSIENT_NODEATTRS);
         fake_cib->cmds->delete(fake_cib, xpath, NULL,
                                       cib_xpath | cib_sync_call | cib_scope_local);
 
         free_xml(cib_node);
     }
 
     action->confirmed = TRUE;
     update_graph(graph, action);
     free(target);
     return TRUE;
 }
 
 int
 run_simulation(pe_working_set_t * data_set, cib_t *cib, GListPtr op_fail_list, bool quiet)
 {
     crm_graph_t *transition = NULL;
     enum transition_status graph_rc = -1;
 
     crm_graph_functions_t exec_fns = {
         exec_pseudo_action,
         exec_rsc_action,
         exec_crmd_action,
         exec_stonith_action,
     };
 
     fake_cib = cib;
     fake_quiet = quiet;
     fake_op_fail_list = op_fail_list;
 
     quiet_log("\nExecuting cluster transition:\n");
 
     set_graph_functions(&exec_fns);
     transition = unpack_graph(data_set->graph, crm_system_name);
     print_graph(LOG_DEBUG, transition);
 
     fake_resource_list = data_set->resources;
     do {
         graph_rc = run_graph(transition);
 
     } while (graph_rc == transition_active);
     fake_resource_list = NULL;
 
     if (graph_rc != transition_complete) {
         fprintf(stdout, "Transition failed: %s\n", transition_status(graph_rc));
         print_graph(LOG_ERR, transition);
     }
     destroy_graph(transition);
     if (graph_rc != transition_complete) {
         fprintf(stdout, "An invalid transition was produced\n");
     }
 
     if (quiet == FALSE) {
         xmlNode *cib_object = NULL;
         int rc = fake_cib->cmds->query(fake_cib, NULL, &cib_object, cib_sync_call | cib_scope_local);
 
         CRM_ASSERT(rc == pcmk_ok);
         cleanup_alloc_calculations(data_set);
         data_set->input = cib_object;
     }
 
     if (graph_rc != transition_complete) {
         return graph_rc;
     }
     return 0;
 }
diff --git a/tools/regression.tools.exp b/tools/regression.tools.exp
index 64034f1242..46d0b9532e 100644
--- a/tools/regression.tools.exp
+++ b/tools/regression.tools.exp
@@ -1,2982 +1,2982 @@
 Created new pacemaker configuration
 Setting up shadow instance
 A new shadow instance was created.  To begin using it paste the following into your shell:
   CIB_shadow=tools-regression ; export CIB_shadow
 =#=#=#= Begin test: Validate CIB =#=#=#=
 <cib epoch="0" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config/>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= Current cib after: Validate CIB =#=#=#=
 <cib epoch="0" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config/>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Validate CIB - OK (0) =#=#=#=
 * Passed: cibadmin       - Validate CIB
 =#=#=#= Begin test: Configure something before erasing =#=#=#=
 =#=#=#= Current cib after: Configure something before erasing =#=#=#=
 <cib epoch="1" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Configure something before erasing - OK (0) =#=#=#=
 * Passed: crm_attribute  - Configure something before erasing
 =#=#=#= Begin test: Require --force for CIB erasure =#=#=#=
 The supplied command is considered dangerous.  To prevent accidental destruction of the cluster, the --force flag is required in order to proceed.
 =#=#=#= Current cib after: Require --force for CIB erasure =#=#=#=
 <cib epoch="1" num_updates="0" admin_epoch="0">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Require --force for CIB erasure - Invalid argument (22) =#=#=#=
 * Passed: cibadmin       - Require --force for CIB erasure
 =#=#=#= Begin test: Allow CIB erasure with --force =#=#=#=
 =#=#=#= Current cib after: Allow CIB erasure with --force =#=#=#=
 <cib epoch="2" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config/>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Allow CIB erasure with --force - OK (0) =#=#=#=
 * Passed: cibadmin       - Allow CIB erasure with --force
 =#=#=#= Begin test: Query CIB =#=#=#=
 =#=#=#= Current cib after: Query CIB =#=#=#=
 <cib epoch="2" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config/>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Query CIB - OK (0) =#=#=#=
 * Passed: cibadmin       - Query CIB
 =#=#=#= Begin test: Set cluster option =#=#=#=
 =#=#=#= Current cib after: Set cluster option =#=#=#=
 <cib epoch="3" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set cluster option - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set cluster option
 =#=#=#= Begin test: Query new cluster option =#=#=#=
     <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
 =#=#=#= Current cib after: Query new cluster option =#=#=#=
 <cib epoch="3" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Query new cluster option - OK (0) =#=#=#=
 * Passed: cibadmin       - Query new cluster option
 =#=#=#= Begin test: Query cluster options =#=#=#=
 =#=#=#= Current cib after: Query cluster options =#=#=#=
 <cib epoch="3" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Query cluster options - OK (0) =#=#=#=
 * Passed: cibadmin       - Query cluster options
 =#=#=#= Begin test: Set no-quorum policy =#=#=#=
 =#=#=#= Current cib after: Set no-quorum policy =#=#=#=
 <cib epoch="4" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set no-quorum policy - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set no-quorum policy
 =#=#=#= Begin test: Delete nvpair =#=#=#=
 =#=#=#= Current cib after: Delete nvpair =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete nvpair - OK (0) =#=#=#=
 * Passed: cibadmin       - Delete nvpair
 =#=#=#= Begin test: Create operaton should fail =#=#=#=
 Call failed: Name not unique on network
 <failed>
   <failed_update id="cib-bootstrap-options" object_type="cluster_property_set" operation="cib_create" reason="Name not unique on network">
     <cluster_property_set id="cib-bootstrap-options">
       <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
     </cluster_property_set>
   </failed_update>
 </failed>
 =#=#=#= Current cib after: Create operaton should fail =#=#=#=
 <cib epoch="5" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create operaton should fail - Name not unique on network (76) =#=#=#=
 * Passed: cibadmin       - Create operaton should fail
 =#=#=#= Begin test: Modify cluster options section =#=#=#=
 =#=#=#= Current cib after: Modify cluster options section =#=#=#=
 <cib epoch="6" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Modify cluster options section - OK (0) =#=#=#=
 * Passed: cibadmin       - Modify cluster options section
 =#=#=#= Begin test: Query updated cluster option =#=#=#=
     <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
 =#=#=#= Current cib after: Query updated cluster option =#=#=#=
 <cib epoch="6" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Query updated cluster option - OK (0) =#=#=#=
 * Passed: cibadmin       - Query updated cluster option
 =#=#=#= Begin test: Set duplicate cluster option =#=#=#=
 =#=#=#= Current cib after: Set duplicate cluster option =#=#=#=
 <cib epoch="7" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set duplicate cluster option - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set duplicate cluster option
 =#=#=#= Begin test: Setting multiply defined cluster option should fail =#=#=#=
 Error performing operation: Name not unique on network
 Multiple attributes match name=cluster-delay
   Value: 60s 	(id=cib-bootstrap-options-cluster-delay)
   Value: 40s 	(id=duplicate-cluster-delay)
 =#=#=#= Current cib after: Setting multiply defined cluster option should fail =#=#=#=
 <cib epoch="7" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="40s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Setting multiply defined cluster option should fail - Name not unique on network (76) =#=#=#=
 * Passed: crm_attribute  - Setting multiply defined cluster option should fail
 =#=#=#= Begin test: Set cluster option with -s =#=#=#=
 =#=#=#= Current cib after: Set cluster option with -s =#=#=#=
 <cib epoch="8" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
         <nvpair id="cib-bootstrap-options-cluster-delay" name="cluster-delay" value="60s"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Set cluster option with -s - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set cluster option with -s
 =#=#=#= Begin test: Delete cluster option with -i =#=#=#=
 Deleted crm_config option: id=(null) name=cluster-delay
 
 =#=#=#= Current cib after: Delete cluster option with -i =#=#=#=
 <cib epoch="9" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes/>
     <resources/>
     <constraints/>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete cluster option with -i - OK (0) =#=#=#=
 * Passed: crm_attribute  - Delete cluster option with -i
 =#=#=#= Begin test: Create node1 and bring it online =#=#=#=
 
 Current cluster status:
 
 
 Performing requested modifications
  + Bringing node node1 online
 
 Transition Summary:
 
 Executing cluster transition:
 
 Revised cluster status:
 Online: [ node1 ]
 
 
 =#=#=#= Current cib after: Create node1 and bring it online =#=#=#=
 <cib epoch="10" num_updates="2" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1"/>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create node1 and bring it online - OK (0) =#=#=#=
 * Passed: crm_simulate   - Create node1 and bring it online
 =#=#=#= Begin test: Create node attribute =#=#=#=
 =#=#=#= Current cib after: Create node attribute =#=#=#=
 <cib epoch="11" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Create node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute  - Create node attribute
 =#=#=#= Begin test: Query new node attribute =#=#=#=
       <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
 =#=#=#= Current cib after: Query new node attribute =#=#=#=
 <cib epoch="11" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate"/>
   </status>
 </cib>
 =#=#=#= End test: Query new node attribute - OK (0) =#=#=#=
 * Passed: cibadmin       - Query new node attribute
 =#=#=#= Begin test: Set a transient (fail-count) node attribute =#=#=#=
 =#=#=#= Current cib after: Set a transient (fail-count) node attribute =#=#=#=
 <cib epoch="11" num_updates="1" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1">
           <nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Set a transient (fail-count) node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute  - Set a transient (fail-count) node attribute
 =#=#=#= Begin test: Query a fail count =#=#=#=
 scope=status  name=fail-count-foo value=3
 =#=#=#= Current cib after: Query a fail count =#=#=#=
 <cib epoch="11" num_updates="1" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1">
           <nvpair id="status-node1-fail-count-foo" name="fail-count-foo" value="3"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Query a fail count - OK (0) =#=#=#=
 * Passed: crm_failcount  - Query a fail count
 =#=#=#= Begin test: Delete a transient (fail-count) node attribute =#=#=#=
 Deleted status attribute: id=status-node1-fail-count-foo name=fail-count-foo
 
 =#=#=#= Current cib after: Delete a transient (fail-count) node attribute =#=#=#=
 <cib epoch="11" num_updates="2" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Delete a transient (fail-count) node attribute - OK (0) =#=#=#=
 * Passed: crm_attribute  - Delete a transient (fail-count) node attribute
 =#=#=#= Begin test: Digest calculation =#=#=#=
 Digest: =#=#=#= Current cib after: Digest calculation =#=#=#=
 <cib epoch="11" num_updates="2" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Digest calculation - OK (0) =#=#=#=
 * Passed: cibadmin       - Digest calculation
 =#=#=#= Begin test: Replace operation should fail =#=#=#=
 Call failed: Update was older than existing configuration
 =#=#=#= Current cib after: Replace operation should fail =#=#=#=
 <cib epoch="11" num_updates="2" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Replace operation should fail - Update was older than existing configuration (205) =#=#=#=
 * Passed: cibadmin       - Replace operation should fail
 =#=#=#= Begin test: Default standby value =#=#=#=
 Error performing operation: No such device or address
 scope=status  name=standby value=off
 =#=#=#= Current cib after: Default standby value =#=#=#=
 <cib epoch="11" num_updates="2" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Default standby value - OK (0) =#=#=#=
 * Passed: crm_standby    - Default standby value
 =#=#=#= Begin test: Set standby status =#=#=#=
 =#=#=#= Current cib after: Set standby status =#=#=#=
 <cib epoch="12" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
           <nvpair id="nodes-node1-standby" name="standby" value="true"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Set standby status - OK (0) =#=#=#=
 * Passed: crm_standby    - Set standby status
 =#=#=#= Begin test: Query standby value =#=#=#=
 scope=nodes  name=standby value=true
 =#=#=#= Current cib after: Query standby value =#=#=#=
 <cib epoch="12" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
           <nvpair id="nodes-node1-standby" name="standby" value="true"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Query standby value - OK (0) =#=#=#=
 * Passed: crm_standby    - Query standby value
 =#=#=#= Begin test: Delete standby value =#=#=#=
 Deleted nodes attribute: id=nodes-node1-standby name=standby
 
 =#=#=#= Current cib after: Delete standby value =#=#=#=
 <cib epoch="13" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources/>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Delete standby value - OK (0) =#=#=#=
 * Passed: crm_standby    - Delete standby value
 =#=#=#= Begin test: Create a resource =#=#=#=
 =#=#=#= Current cib after: Create a resource =#=#=#=
 <cib epoch="14" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Create a resource - OK (0) =#=#=#=
 * Passed: cibadmin       - Create a resource
 =#=#=#= Begin test: Create a resource meta attribute =#=#=#=
 
 Set 'dummy' option: id=dummy-meta_attributes-is-managed set=dummy-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
 <cib epoch="15" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes">
           <nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute
 =#=#=#= Begin test: Query a resource meta attribute =#=#=#=
 false
 =#=#=#= Current cib after: Query a resource meta attribute =#=#=#=
 <cib epoch="15" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes">
           <nvpair id="dummy-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Query a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Query a resource meta attribute
 =#=#=#= Begin test: Remove a resource meta attribute =#=#=#=
 Deleted 'dummy' option: id=dummy-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Remove a resource meta attribute =#=#=#=
 <cib epoch="16" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Remove a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Remove a resource meta attribute
 =#=#=#= Begin test: Create a resource attribute =#=#=#=
 
 Set 'dummy' option: id=dummy-instance_attributes-delay set=dummy-instance_attributes name=delay=10s
 =#=#=#= Current cib after: Create a resource attribute =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Create a resource attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource attribute
 =#=#=#= Begin test: List the configured resources =#=#=#=
  dummy	(ocf::pacemaker:Dummy):	Stopped
 =#=#=#= Current cib after: List the configured resources =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: List the configured resources - OK (0) =#=#=#=
 * Passed: crm_resource   - List the configured resources
 =#=#=#= Begin test: Require a destination when migrating a resource that is stopped =#=#=#=
 Resource 'dummy' not moved: active in 0 locations.
 You can prevent 'dummy' from running on a specific location with: --ban --node <name>
 Error performing operation: Invalid argument
 =#=#=#= Current cib after: Require a destination when migrating a resource that is stopped =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Require a destination when migrating a resource that is stopped - Invalid argument (22) =#=#=#=
 * Passed: crm_resource   - Require a destination when migrating a resource that is stopped
 =#=#=#= Begin test: Don't support migration to non-existent locations =#=#=#=
 Error performing operation: node 'i.dont.exist' is unknown
 Error performing operation: No such device or address
 =#=#=#= Current cib after: Don't support migration to non-existent locations =#=#=#=
 <cib epoch="17" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Don't support migration to non-existent locations - No such device or address (6) =#=#=#=
 * Passed: crm_resource   - Don't support migration to non-existent locations
 =#=#=#= Begin test: Create a fencing resource =#=#=#=
 =#=#=#= Current cib after: Create a fencing resource =#=#=#=
 <cib epoch="18" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Create a fencing resource - OK (0) =#=#=#=
 * Passed: cibadmin       - Create a fencing resource
 =#=#=#= Begin test: Bring resources online =#=#=#=
 
 Current cluster status:
 Online: [ node1 ]
 
  dummy	(ocf::pacemaker:Dummy):	Stopped
  Fence	(stonith:fence_true):	Stopped
 
 Transition Summary:
  * Start      dummy   ( node1 )  
  * Start      Fence   ( node1 )  
 
 Executing cluster transition:
  * Resource action: dummy           monitor on node1
  * Resource action: Fence           monitor on node1
  * Resource action: dummy           start on node1
  * Resource action: Fence           start on node1
 
 Revised cluster status:
 Online: [ node1 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node1
 
 =#=#=#= Current cib after: Bring resources online =#=#=#=
 <cib epoch="18" num_updates="4" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Bring resources online - OK (0) =#=#=#=
 * Passed: crm_simulate   - Bring resources online
 =#=#=#= Begin test: Try to move a resource to its existing location =#=#=#=
 Error performing operation: dummy is already active on node1
 Error performing operation: Invalid argument
 =#=#=#= Current cib after: Try to move a resource to its existing location =#=#=#=
 <cib epoch="18" num_updates="4" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Try to move a resource to its existing location - Invalid argument (22) =#=#=#=
 * Passed: crm_resource   - Try to move a resource to its existing location
 =#=#=#= Begin test: Move a resource from its existing location =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
 	This will prevent dummy from running on node1 until the constraint is removed using the 'crm_resource --clear' command or manually with cibadmin
 	This will be the case even if node1 is the last node in the cluster
 	This message can be disabled with --quiet
 =#=#=#= Current cib after: Move a resource from its existing location =#=#=#=
 <cib epoch="19" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Move a resource from its existing location - OK (0) =#=#=#=
 * Passed: crm_resource   - Move a resource from its existing location
 =#=#=#= Begin test: Clear out constraints generated by --move =#=#=#=
 =#=#=#= Current cib after: Clear out constraints generated by --move =#=#=#=
 <cib epoch="20" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Clear out constraints generated by --move - OK (0) =#=#=#=
 * Passed: crm_resource   - Clear out constraints generated by --move
 =#=#=#= Begin test: Default ticket granted state =#=#=#=
 false
 =#=#=#= Current cib after: Default ticket granted state =#=#=#=
 <cib epoch="20" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Default ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Default ticket granted state
 =#=#=#= Begin test: Set ticket granted state =#=#=#=
 =#=#=#= Current cib after: Set ticket granted state =#=#=#=
 <cib epoch="20" num_updates="1" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA" granted="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Set ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Set ticket granted state
 =#=#=#= Begin test: Query ticket granted state =#=#=#=
 false
 =#=#=#= Current cib after: Query ticket granted state =#=#=#=
 <cib epoch="20" num_updates="1" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA" granted="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Query ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Query ticket granted state
 =#=#=#= Begin test: Delete ticket granted state =#=#=#=
 =#=#=#= Current cib after: Delete ticket granted state =#=#=#=
 <cib epoch="20" num_updates="2" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Delete ticket granted state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Delete ticket granted state
 =#=#=#= Begin test: Make a ticket standby =#=#=#=
 =#=#=#= Current cib after: Make a ticket standby =#=#=#=
 <cib epoch="20" num_updates="3" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA" standby="true"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Make a ticket standby - OK (0) =#=#=#=
 * Passed: crm_ticket     - Make a ticket standby
 =#=#=#= Begin test: Query ticket standby state =#=#=#=
 true
 =#=#=#= Current cib after: Query ticket standby state =#=#=#=
 <cib epoch="20" num_updates="3" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA" standby="true"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Query ticket standby state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Query ticket standby state
 =#=#=#= Begin test: Activate a ticket =#=#=#=
 =#=#=#= Current cib after: Activate a ticket =#=#=#=
 <cib epoch="20" num_updates="4" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA" standby="false"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Activate a ticket - OK (0) =#=#=#=
 * Passed: crm_ticket     - Activate a ticket
 =#=#=#= Begin test: Delete ticket standby state =#=#=#=
 =#=#=#= Current cib after: Delete ticket standby state =#=#=#=
 <cib epoch="20" num_updates="5" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Delete ticket standby state - OK (0) =#=#=#=
 * Passed: crm_ticket     - Delete ticket standby state
 =#=#=#= Begin test: Ban a resource on unknown node =#=#=#=
 Error performing operation: node 'host1' is unknown
 Error performing operation: No such device or address
 =#=#=#= Current cib after: Ban a resource on unknown node =#=#=#=
 <cib epoch="20" num_updates="5" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
   </status>
 </cib>
 =#=#=#= End test: Ban a resource on unknown node - No such device or address (6) =#=#=#=
 * Passed: crm_resource   - Ban a resource on unknown node
 =#=#=#= Begin test: Create two more nodes and bring them online =#=#=#=
 
 Current cluster status:
 Online: [ node1 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node1
 
 Performing requested modifications
  + Bringing node node2 online
  + Bringing node node3 online
 
 Transition Summary:
  * Move       Fence   ( node1 -> node2 )  
 
 Executing cluster transition:
  * Resource action: dummy           monitor on node3
  * Resource action: dummy           monitor on node2
  * Resource action: Fence           monitor on node3
  * Resource action: Fence           monitor on node2
  * Resource action: Fence           stop on node1
  * Pseudo action:   all_stopped
  * Resource action: Fence           start on node2
 
 Revised cluster status:
 Online: [ node1 node2 node3 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node2
 
 =#=#=#= Current cib after: Create two more nodes and bring them online =#=#=#=
 <cib epoch="22" num_updates="8" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints/>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Create two more nodes and bring them online - OK (0) =#=#=#=
 * Passed: crm_simulate   - Create two more nodes and bring them online
 =#=#=#= Begin test: Ban dummy from node1 =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node1' with a score of -INFINITY for resource dummy on node1.
 	This will prevent dummy from running on node1 until the constraint is removed using the 'crm_resource --clear' command or manually with cibadmin
 	This will be the case even if node1 is the last node in the cluster
 	This message can be disabled with --quiet
 =#=#=#= Current cib after: Ban dummy from node1 =#=#=#=
 <cib epoch="23" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Ban dummy from node1 - OK (0) =#=#=#=
 * Passed: crm_resource   - Ban dummy from node1
 =#=#=#= Begin test: Ban dummy from node2 =#=#=#=
 WARNING: Creating rsc_location constraint 'cli-ban-dummy-on-node2' with a score of -INFINITY for resource dummy on node2.
 	This will prevent dummy from running on node2 until the constraint is removed using the 'crm_resource --clear' command or manually with cibadmin
 	This will be the case even if node2 is the last node in the cluster
 	This message can be disabled with --quiet
 =#=#=#= Current cib after: Ban dummy from node2 =#=#=#=
 <cib epoch="24" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
       <rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Ban dummy from node2 - OK (0) =#=#=#=
 * Passed: crm_resource   - Ban dummy from node2
 =#=#=#= Begin test: Relocate resources due to ban =#=#=#=
 
 Current cluster status:
 Online: [ node1 node2 node3 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node1
  Fence	(stonith:fence_true):	Started node2
 
 Transition Summary:
  * Move       dummy   ( node1 -> node3 )  
 
 Executing cluster transition:
  * Resource action: dummy           stop on node1
  * Pseudo action:   all_stopped
  * Resource action: dummy           start on node3
 
 Revised cluster status:
 Online: [ node1 node2 node3 ]
 
  dummy	(ocf::pacemaker:Dummy):	Started node3
  Fence	(stonith:fence_true):	Started node2
 
 =#=#=#= Current cib after: Relocate resources due to ban =#=#=#=
 <cib epoch="24" num_updates="2" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node1" rsc="dummy" role="Started" node="node1" score="-INFINITY"/>
       <rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Relocate resources due to ban - OK (0) =#=#=#=
 * Passed: crm_simulate   - Relocate resources due to ban
 =#=#=#= Begin test: Move dummy to node1 =#=#=#=
 =#=#=#= Current cib after: Move dummy to node1 =#=#=#=
 <cib epoch="26" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-ban-dummy-on-node2" rsc="dummy" role="Started" node="node2" score="-INFINITY"/>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Move dummy to node1 - OK (0) =#=#=#=
 * Passed: crm_resource   - Move dummy to node1
 =#=#=#= Begin test: Clear implicit constraints for dummy on node2 =#=#=#=
 =#=#=#= Current cib after: Clear implicit constraints for dummy on node2 =#=#=#=
 <cib epoch="27" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status>
     <node_state id="node1" uname="node1" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <transient_attributes id="node1">
         <instance_attributes id="status-node1"/>
       </transient_attributes>
       <lrm id="node1">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_stop_0" operation="stop" crm-debug-origin="crm_simulate" transition-key="3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;3:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="3" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <tickets>
       <ticket_state id="ticketA"/>
     </tickets>
     <node_state id="node2" uname="node2" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node2">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
     <node_state id="node3" uname="node3" in_ccm="true" crmd="online" join="member" expected="member" crm-debug-origin="crm_simulate">
       <lrm id="node3">
         <lrm_resources>
           <lrm_resource id="dummy" class="ocf" provider="pacemaker" type="Dummy">
-            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="crm_simulate" transition-key="2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:0;2:-1:0:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="2" rc-code="0" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="0830891652dabe627ca72b8e879199b1"/>
           </lrm_resource>
           <lrm_resource id="Fence" class="stonith" type="fence_true">
-            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
+            <lrm_rsc_op id="Fence_last_0" operation_key="Fence_monitor_0" operation="monitor" crm-debug-origin="crm_simulate" transition-key="1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" transition-magic="0:7;1:-1:7:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx" exit-reason="" call-id="1" rc-code="7" op-status="0" interval="0" exec-time="0" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
     </node_state>
   </status>
 </cib>
 =#=#=#= End test: Clear implicit constraints for dummy on node2 - OK (0) =#=#=#=
 * Passed: crm_resource   - Clear implicit constraints for dummy on node2
 =#=#=#= Begin test: Drop the status section =#=#=#=
 =#=#=#= End test: Drop the status section - OK (0) =#=#=#=
 * Passed: cibadmin       - Drop the status section
 =#=#=#= Begin test: Create a clone =#=#=#=
 =#=#=#= End test: Create a clone - OK (0) =#=#=#=
 * Passed: cibadmin       - Create a clone
 =#=#=#= Begin test: Create a resource meta attribute =#=#=#=
 
 Performing update of 'is-managed' on 'test-clone', the parent of 'test-primitive'
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute =#=#=#=
 <cib epoch="29" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy"/>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute
 =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
 <cib epoch="30" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute in the primitive
 =#=#=#= Begin test: Update resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: false 	(id=test-primitive-meta_attributes-is-managed)
   Value: false 	(id=test-clone-meta_attributes-is-managed)
 
 A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed=true
 =#=#=#= Current cib after: Update resource meta attribute with duplicates =#=#=#=
 <cib epoch="31" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="false"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource   - Update resource meta attribute with duplicates
 =#=#=#= Begin test: Update resource meta attribute with duplicates (force clone) =#=#=#=
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed=true
 =#=#=#= Current cib after: Update resource meta attribute with duplicates (force clone) =#=#=#=
 <cib epoch="32" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update resource meta attribute with duplicates (force clone) - OK (0) =#=#=#=
 * Passed: crm_resource   - Update resource meta attribute with duplicates (force clone)
 =#=#=#= Begin test: Update child resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: true 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed=false
 =#=#=#= Current cib after: Update child resource meta attribute with duplicates =#=#=#=
 <cib epoch="33" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update child resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource   - Update child resource meta attribute with duplicates
 =#=#=#= Begin test: Delete resource meta attribute with duplicates =#=#=#=
 Multiple attributes match name=is-managed
   Value: false 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 A value for 'is-managed' already exists in child 'test-primitive', performing delete on that instead of 'test-clone'
 Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource meta attribute with duplicates =#=#=#=
 <cib epoch="34" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource meta attribute with duplicates - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource meta attribute with duplicates
 =#=#=#= Begin test: Delete resource meta attribute in parent =#=#=#=
 Performing delete of 'is-managed' on 'test-clone', the parent of 'test-primitive'
 Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource meta attribute in parent =#=#=#=
 <cib epoch="35" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource meta attribute in parent - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource meta attribute in parent
 =#=#=#= Begin test: Create a resource meta attribute in the primitive =#=#=#=
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed set=test-primitive-meta_attributes name=is-managed=false
 =#=#=#= Current cib after: Create a resource meta attribute in the primitive =#=#=#=
 <cib epoch="36" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="false"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in the primitive - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute in the primitive
 =#=#=#= Begin test: Update existing resource meta attribute =#=#=#=
 A value for 'is-managed' already exists in child 'test-primitive', performing update on that instead of 'test-clone'
 Set 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed=true
 =#=#=#= Current cib after: Update existing resource meta attribute =#=#=#=
 <cib epoch="37" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Update existing resource meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Update existing resource meta attribute
 =#=#=#= Begin test: Create a resource meta attribute in the parent =#=#=#=
 Set 'test-clone' option: id=test-clone-meta_attributes-is-managed set=test-clone-meta_attributes name=is-managed=true
 =#=#=#= Current cib after: Create a resource meta attribute in the parent =#=#=#=
 <cib epoch="38" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Create a resource meta attribute in the parent - OK (0) =#=#=#=
 * Passed: crm_resource   - Create a resource meta attribute in the parent
 =#=#=#= Begin test: Copy resources =#=#=#=
 =#=#=#= End test: Copy resources - OK (0) =#=#=#=
 * Passed: cibadmin       - Copy resources
 =#=#=#= Begin test: Delete resource paremt meta attribute (force) =#=#=#=
 Deleted 'test-clone' option: id=test-clone-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource paremt meta attribute (force) =#=#=#=
 <cib epoch="39" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes"/>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource paremt meta attribute (force) - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource paremt meta attribute (force)
 =#=#=#= Begin test: Restore duplicates =#=#=#=
 =#=#=#= Current cib after: Restore duplicates =#=#=#=
 <cib epoch="40" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes">
             <nvpair id="test-primitive-meta_attributes-is-managed" name="is-managed" value="true"/>
           </meta_attributes>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Restore duplicates - OK (0) =#=#=#=
 * Passed: cibadmin       - Restore duplicates
 =#=#=#= Begin test: Delete resource child meta attribute =#=#=#=
 Multiple attributes match name=is-managed
   Value: true 	(id=test-primitive-meta_attributes-is-managed)
   Value: true 	(id=test-clone-meta_attributes-is-managed)
 
 Deleted 'test-primitive' option: id=test-primitive-meta_attributes-is-managed name=is-managed
 =#=#=#= Current cib after: Delete resource child meta attribute =#=#=#=
 <cib epoch="41" num_updates="0" admin_epoch="1">
   <configuration>
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-no-quorum-policy" name="no-quorum-policy" value="ignore"/>
       </cluster_property_set>
       <cluster_property_set id="duplicate">
         <nvpair id="duplicate-cluster-delay" name="cluster-delay" value="30s"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="node1" uname="node1">
         <instance_attributes id="nodes-node1">
           <nvpair id="nodes-node1-ram" name="ram" value="1024M"/>
         </instance_attributes>
       </node>
       <node id="node2" uname="node2"/>
       <node id="node3" uname="node3"/>
     </nodes>
     <resources>
       <primitive id="dummy" class="ocf" provider="pacemaker" type="Dummy">
         <meta_attributes id="dummy-meta_attributes"/>
         <instance_attributes id="dummy-instance_attributes">
           <nvpair id="dummy-instance_attributes-delay" name="delay" value="10s"/>
         </instance_attributes>
       </primitive>
       <primitive id="Fence" class="stonith" type="fence_true"/>
       <clone id="test-clone">
         <primitive id="test-primitive" class="ocf" provider="pacemaker" type="Dummy">
           <meta_attributes id="test-primitive-meta_attributes"/>
         </primitive>
         <meta_attributes id="test-clone-meta_attributes">
           <nvpair id="test-clone-meta_attributes-is-managed" name="is-managed" value="true"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="cli-prefer-dummy" rsc="dummy" role="Started" node="node1" score="INFINITY"/>
     </constraints>
   </configuration>
   <status/>
 </cib>
 =#=#=#= End test: Delete resource child meta attribute - OK (0) =#=#=#=
 * Passed: crm_resource   - Delete resource child meta attribute