diff --git a/crmd/corosync.c b/crmd/corosync.c
index c3ff684bb3..51f64d0cf0 100644
--- a/crmd/corosync.c
+++ b/crmd/corosync.c
@@ -1,214 +1,214 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/xml.h>
 
 #include <crmd.h>
 #include <crmd_fsa.h>
 #include <fsa_proto.h>
 #include <crmd_messages.h>
 #include <crmd_callbacks.h>
 #include <crmd_lrm.h>
 #include <tengine.h>
 
 #include <sys/types.h>
 #include <sys/stat.h>
 
 extern void post_cache_update(int seq);
 extern void crmd_ha_connection_destroy(gpointer user_data);
 
 /*	 A_HA_CONNECT	*/
 #if SUPPORT_COROSYNC
 
 static void
 crmd_cs_dispatch(cpg_handle_t handle,
                          const struct cpg_name *groupName,
                          uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     int seq = 0;
     xmlNode *xml = NULL;
     const char *seq_s = NULL;
     crm_node_t *peer = NULL;
     enum crm_proc_flag flag = crm_proc_cpg;
 
     uint32_t kind = 0;
     const char *from = NULL;
     char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
 
     if(data == NULL) {
         return;
     }
     xml = string2xml(data);
     if (xml == NULL) {
         crm_err("Could not parse message content (%d): %.100s", kind, data);
         free(data);
         return;
     }
 
     switch (kind) {
         case crm_class_members:
             seq_s = crm_element_value(xml, "id");
             seq = crm_int_helper(seq_s, NULL);
             set_bit(fsa_input_register, R_PEER_DATA);
             post_cache_update(seq);
 
             /* fall through */
         case crm_class_quorum:
             crm_update_quorum(crm_have_quorum, FALSE);
             if (AM_I_DC) {
                 const char *votes = crm_element_value(xml, "expected");
 
                 if (votes == NULL || check_number(votes) == FALSE) {
                     crm_log_xml_err(xml, "Invalid quorum/membership update");
 
                 } else {
                     int rc = update_attr_delegate(fsa_cib_conn,
                                                   cib_quorum_override | cib_scope_local |
                                                   cib_inhibit_notify,
                                                   XML_CIB_TAG_CRMCONFIG, NULL, NULL, NULL, NULL,
                                                   XML_ATTR_EXPECTED_VOTES, votes, FALSE, NULL);
 
                     crm_info("Setting expected votes to %s", votes);
                     if (pcmk_ok > rc) {
                         crm_err("Quorum update failed: %s", pcmk_strerror(rc));
                     }
                 }
             }
             break;
 
         case crm_class_cluster:
             crm_xml_add(xml, F_ORIG, from);
             /* crm_xml_add_int(xml, F_SEQ, wrapper->id); Fake? */
 
             if (is_heartbeat_cluster()) {
                 flag = crm_proc_heartbeat;
 
             } else if (is_classic_ais_cluster()) {
                 flag = crm_proc_plugin;
             }
 
             peer = crm_get_peer(0, from);
             if (is_not_set(peer->processes, flag)) {
                 /* If we can still talk to our peer process on that node,
                  * then its also part of the corosync membership
                  */
-                crm_warn("Recieving messages from a node we think is dead: %s[%d]", peer->uname,
+                crm_warn("Receiving messages from a node we think is dead: %s[%d]", peer->uname,
                          peer->id);
                 crm_update_peer_proc(__FUNCTION__, peer, flag, ONLINESTATUS);
             }
             crmd_ha_msg_filter(xml);
             break;
 
         case crm_class_rmpeer:
             /* Ignore */
             break;
 
         case crm_class_notify:
         case crm_class_nodeid:
             crm_err("Unexpected message class (%d): %.100s", kind, data);
             break;
 
         default:
             crm_err("Invalid message class (%d): %.100s", kind, data);
     }
 
     free(data);
     free_xml(xml);
 }
 
 static gboolean
 crmd_cman_dispatch(unsigned long long seq, gboolean quorate)
 {
     crm_update_quorum(quorate, FALSE);
     post_cache_update(seq);
     return TRUE;
 }
 
 static void
 crmd_quorum_destroy(gpointer user_data)
 {
     if (is_not_set(fsa_input_register, R_HA_DISCONNECTED)) {
         crm_err("connection terminated");
         crmd_exit(ENOLINK);
 
     } else {
         crm_info("connection closed");
     }
 }
 
 static void
 crmd_cs_destroy(gpointer user_data)
 {
     if (is_not_set(fsa_input_register, R_HA_DISCONNECTED)) {
         crm_err("connection terminated");
         crmd_exit(ENOLINK);
 
     } else {
         crm_info("connection closed");
     }
 }
 
 #  if SUPPORT_CMAN
 static void
 crmd_cman_destroy(gpointer user_data)
 {
     if (is_not_set(fsa_input_register, R_HA_DISCONNECTED)) {
         crm_err("connection terminated");
         crmd_exit(ENOLINK);
 
     } else {
         crm_info("connection closed");
     }
 }
 #  endif
 
 extern gboolean crm_connect_corosync(crm_cluster_t * cluster);
 
 gboolean
 crm_connect_corosync(crm_cluster_t * cluster)
 {
     gboolean rc = FALSE;
 
     if (is_openais_cluster()) {
         crm_set_status_callback(&peer_update_callback);
         cluster->cpg.cpg_deliver_fn = crmd_cs_dispatch;
         cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership;
         cluster->destroy = crmd_cs_destroy;
 
         rc = crm_cluster_connect(cluster);
     }
 
     if (rc && is_corosync_cluster()) {
         cluster_connect_quorum(crmd_cman_dispatch, crmd_quorum_destroy);
     }
 #  if SUPPORT_CMAN
     if (rc && is_cman_cluster()) {
         init_cman_connection(crmd_cman_dispatch, crmd_cman_destroy);
         set_bit(fsa_input_register, R_MEMBERSHIP);
     }
 #  endif
     return rc;
 }
 
 #endif
diff --git a/crmd/fsa_defines.h b/crmd/fsa_defines.h
index 1d90e69be0..7b2ca00087 100644
--- a/crmd/fsa_defines.h
+++ b/crmd/fsa_defines.h
@@ -1,493 +1,493 @@
 /* 
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  * 
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  * 
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  * 
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 #ifndef FSA_DEFINES__H
 #  define FSA_DEFINES__H
 
 /*======================================
  *	States the DC/CRMd can be in
  *======================================*/
 enum crmd_fsa_state {
     S_IDLE = 0,                 /* Nothing happening */
 
     S_ELECTION,                 /* Take part in the election algorithm as 
                                  * described below
                                  */
     S_INTEGRATION,              /* integrate that status of new nodes (which is 
                                  * all of them if we have just been elected DC)
                                  * to form a complete and up-to-date picture of
                                  * the CIB
                                  */
     S_FINALIZE_JOIN,            /* integrate that status of new nodes (which is 
                                  * all of them if we have just been elected DC)
                                  * to form a complete and up-to-date picture of
                                  * the CIB
                                  */
     S_NOT_DC,                   /* we are in crmd/slave mode */
     S_POLICY_ENGINE,            /* Determin the next stable state of the cluster
                                  */
     S_RECOVERY,                 /* Something bad happened, check everything is ok
                                  * before continuing and attempt to recover if
                                  * required
                                  */
     S_RELEASE_DC,               /* we were the DC, but now we arent anymore,
                                  * possibly by our own request, and we should 
                                  * release all unnecessary sub-systems, finish
                                  * any pending actions, do general cleanup and
                                  * unset anything that makes us think we are
                                  * special :)
                                  */
     S_STARTING,                 /* we are just starting out */
     S_PENDING,                  /* we are not a full/active member yet */
     S_STOPPING,                 /* We are in the final stages of shutting down */
     S_TERMINATE,                /* We are going to shutdown, this is the equiv of
                                  * "Sending TERM signal to all processes" in Linux
                                  * and in worst case scenarios could be considered
                                  * a self STONITH
                                  */
     S_TRANSITION_ENGINE,        /* Attempt to make the calculated next stable
                                  * state of the cluster a reality
                                  */
 
     S_HALT,                     /* Freeze - dont do anything
                                  * Something ad happened that needs the admin to fix
                                  * Wait for I_ELECTION
                                  */
 
     /*  ----------- Last input found in table is above ---------- */
     S_ILLEGAL                   /* This is an illegal FSA state */
         /* (must be last) */
 };
 
 #  define MAXSTATE	S_ILLEGAL
 /*
 	A state diagram can be constructed from the dc_fsa.dot with the
 	following command:
 
 	dot -Tpng crmd_fsa.dot > crmd_fsa.png
 
 Description:
 
       Once we start and do some basic sanity checks, we go into the
       S_NOT_DC state and await instructions from the DC or input from
       the CCM which indicates the election algorithm needs to run.
 
       If the election algorithm is triggered we enter the S_ELECTION state
       from where we can either go back to the S_NOT_DC state or progress
       to the S_INTEGRATION state (or S_RELEASE_DC if we used to be the DC
       but arent anymore).
 
       The election algorithm has been adapted from
       http://www.cs.indiana.edu/cgi-bin/techreports/TRNNN.cgi?trnum=TR521
 
       Loosly known as the Bully Algorithm, its major points are:
       - Election is initiated by any node (N) notices that the coordinator
 	is no longer responding
       - Concurrent multiple elections are possible
       - Algorithm
 	  + N sends ELECTION messages to all nodes that occur earlier in
 	  the CCM's membership list.
 	  + If no one responds, N wins and becomes coordinator
 	  + N sends out COORDINATOR messages to all other nodes in the
 	  partition
 	  + If one of higher-ups answers, it takes over. N is done.
       
       Once the election is complete, if we are the DC, we enter the
       S_INTEGRATION state which is a DC-in-waiting style state.  We are
       the DC, but we shouldnt do anything yet because we may not have an
       up-to-date picture of the cluster.  There may of course be times
       when this fails, so we should go back to the S_RECOVERY stage and
       check everything is ok.  We may also end up here if a new node came
       online, since each node is authorative on itself and we would want
       to incorporate its information into the CIB.
 
       Once we have the latest CIB, we then enter the S_POLICY_ENGINE state
       where invoke the Policy Engine. It is possible that between
-      invoking the Policy Engine and recieving an answer, that we recieve
+      invoking the Policy Engine and receiving an answer, that we receive
       more input. In this case we would discard the orginal result and
       invoke it again.
 
       Once we are satisfied with the output from the Policy Engine we
       enter S_TRANSITION_ENGINE and feed the Policy Engine's output to the
       Transition Engine who attempts to make the Policy Engine's
       calculation a reality.  If the transition completes successfully,
       we enter S_IDLE, otherwise we go back to S_POLICY_ENGINE with the
       current unstable state and try again.
       
       Of course we may be asked to shutdown at any time, however we must
       progress to S_NOT_DC before doing so.  Once we have handed over DC
       duties to another node, we can then shut down like everyone else,
       that is by asking the DC for permission and waiting it to take all
       our resources away.
 
       The case where we are the DC and the only node in the cluster is a
       special case and handled as an escalation which takes us to
       S_SHUTDOWN.  Similarly if any other point in the shutdown
       fails or stalls, this is escalated and we end up in S_TERMINATE.
 
       At any point, the CRMd/DC can relay messages for its sub-systems,
       but outbound messages (from sub-systems) should probably be blocked
       until S_INTEGRATION (for the DC case) or the join protocol has
       completed (for the CRMd case)
       
 */
 
 /*======================================
  *
  * 	Inputs/Events/Stimuli to be given to the finite state machine
  *
  *	Some of these a true events, and others a synthesised based on
  *	the "register" (see below) and the contents or source of messages.
  *
  *	At this point, my plan is to have a loop of some sort that keeps
- *	going until recieving I_NULL
+ *	going until receiving I_NULL
  *
  *======================================*/
 enum crmd_fsa_input {
 /* 0 */
     I_NULL,                     /* Nothing happened */
 /* 1 */
 
     I_CIB_OP,                   /* An update to the CIB occurred */
     I_CIB_UPDATE,               /* An update to the CIB occurred */
     I_DC_TIMEOUT,               /* We have lost communication with the DC */
     I_ELECTION,                 /* Someone started an election */
     I_PE_CALC,                  /* The Policy Engine needs to be invoked */
     I_RELEASE_DC,               /* The election completed and we were not
                                  * elected, but we were the DC beforehand
                                  */
     I_ELECTION_DC,              /* The election completed and we were (re-)elected
                                  * DC
                                  */
     I_ERROR,                    /* Something bad happened (more serious than
                                  * I_FAIL) and may not have been due to the action
                                  * being performed.  For example, we may have lost
                                  * our connection to the CIB.
                                  */
 /* 9 */
     I_FAIL,                     /* The action failed to complete successfully */
     I_INTEGRATED,
     I_FINALIZED,
     I_NODE_JOIN,                /* A node has entered the cluster */
     I_NOT_DC,                   /* We are not and were not the DC before or after
                                  * the current operation or state
                                  */
     I_RECOVERED,                /* The recovery process completed successfully */
     I_RELEASE_FAIL,             /* We could not give up DC status for some reason
                                  */
     I_RELEASE_SUCCESS,          /* We are no longer the DC */
     I_RESTART,                  /* The current set of actions needs to be
                                  * restarted
                                  */
     I_TE_SUCCESS,               /* Some non-resource, non-ccm action is required
                                  * of us, eg. ping
                                  */
 /* 20 */
     I_ROUTER,                   /* Do our job as router and forward this to the
                                  * right place
                                  */
     I_SHUTDOWN,                 /* We are asking to shutdown */
     I_STOP,                     /* We have been told to shutdown */
     I_TERMINATE,                /* Actually exit */
     I_STARTUP,
     I_PE_SUCCESS,               /* The action completed successfully */
 
     I_JOIN_OFFER,               /* The DC is offering membership */
     I_JOIN_REQUEST,             /* The client is requesting membership */
     I_JOIN_RESULT,              /* If not the DC: The result of a join request
                                  * Else: A client is responding with its local state info
                                  */
 
     I_WAIT_FOR_EVENT,           /* we may be waiting for an async task to "happen"
                                  * and until it does, we cant do anything else
                                  */
 
     I_DC_HEARTBEAT,             /* The DC is telling us that it is alive and well */
 
     I_LRM_EVENT,
 
 /* 30 */
     I_PENDING,
     I_HALT,
 
     /*  ------------ Last input found in table is above ----------- */
     I_ILLEGAL                   /* This is an illegal value for an FSA input */
         /* (must be last) */
 };
 
 #  define MAXINPUT	I_ILLEGAL
 
 #  define I_MESSAGE	I_ROUTER
 
 /*======================================
  *
  * actions
  *
  * Some of the actions below will always occur together for now, but I can
  * forsee that this may not always be the case.  So I've spilt them up so
  * that if they ever do need to be called independantly in the future, it
  * wont be a problem. 
  *
  * For example, separating A_LRM_CONNECT from A_STARTUP might be useful 
  * if we ever try to recover from a faulty or disconnected LRM.
  *
  *======================================*/
 
          /* Dont do anything */
 #  define	A_NOTHING		0x0000000000000000ULL
 
 /* -- Startup actions -- */
         /* Hook to perform any actions (other than starting the CIB,
          * connecting to HA or the CCM) that might be needed as part
          * of the startup.
          */
 #  define	A_STARTUP		0x0000000000000001ULL
         /* Hook to perform any actions that might be needed as part
          * after startup is successful.
          */
 #  define	A_STARTED		0x0000000000000002ULL
         /* Connect to Heartbeat */
 #  define	A_HA_CONNECT		0x0000000000000004ULL
 #  define	A_HA_DISCONNECT		0x0000000000000008ULL
 
 #  define	A_INTEGRATE_TIMER_START	0x0000000000000010ULL
 #  define	A_INTEGRATE_TIMER_STOP	0x0000000000000020ULL
 #  define	A_FINALIZE_TIMER_START	0x0000000000000040ULL
 #  define	A_FINALIZE_TIMER_STOP	0x0000000000000080ULL
 
 /* -- Election actions -- */
 #  define	A_DC_TIMER_START	0x0000000000000100ULL
 #  define	A_DC_TIMER_STOP		0x0000000000000200ULL
 #  define	A_ELECTION_COUNT	0x0000000000000400ULL
 #  define	A_ELECTION_VOTE		0x0000000000000800ULL
 
 #  define A_ELECTION_START	0x0000000000001000ULL
 
 /* -- Message processing -- */
         /* Process the queue of requests */
 #  define	A_MSG_PROCESS		0x0000000000002000ULL
         /* Send the message to the correct recipient */
 #  define	A_MSG_ROUTE		0x0000000000004000ULL
 
         /* Send a welcome message to new node(s) */
 #  define	A_DC_JOIN_OFFER_ONE	0x0000000000008000ULL
 
 /* -- Server Join protocol actions -- */
         /* Send a welcome message to all nodes */
 #  define	A_DC_JOIN_OFFER_ALL	0x0000000000010000ULL
         /* Process the remote node's ack of our join message */
 #  define	A_DC_JOIN_PROCESS_REQ	0x0000000000020000ULL
         /* Send out the reults of the Join phase */
 #  define	A_DC_JOIN_FINALIZE	0x0000000000040000ULL
         /* Send out the reults of the Join phase */
 #  define	A_DC_JOIN_PROCESS_ACK	0x0000000000080000ULL
 
 /* -- Client Join protocol actions -- */
 #  define	A_CL_JOIN_QUERY		0x0000000000100000ULL
 #  define	A_CL_JOIN_ANNOUNCE	0x0000000000200000ULL
         /* Request membership to the DC list */
 #  define	A_CL_JOIN_REQUEST	0x0000000000400000ULL
         /* Did the DC accept or reject the request */
 #  define	A_CL_JOIN_RESULT	0x0000000000800000ULL
 
 /* -- Recovery, DC start/stop -- */
         /* Something bad happened, try to recover */
 #  define	A_RECOVER		0x0000000001000000ULL
         /* Hook to perform any actions (apart from starting, the TE, PE 
          * and gathering the latest CIB) that might be necessary before 
          * giving up the responsibilities of being the DC.
          */
 #  define	A_DC_RELEASE		0x0000000002000000ULL
         /* */
 #  define	A_DC_RELEASED		0x0000000004000000ULL
         /* Hook to perform any actions (apart from starting, the TE, PE
          * and gathering the latest CIB) that might be necessary before
          * taking over the responsibilities of being the DC.
          */
 #  define	A_DC_TAKEOVER		0x0000000008000000ULL
 
 /* -- Shutdown actions -- */
 #  define	A_SHUTDOWN		0x0000000010000000ULL
 #  define	A_STOP			0x0000000020000000ULL
 #  define	A_EXIT_0		0x0000000040000000ULL
 #  define	A_EXIT_1		0x0000000080000000ULL
 
 #  define	A_SHUTDOWN_REQ		0x0000000100000000ULL
 #  define	A_ELECTION_CHECK	0x0000000200000000ULL
 #  define A_DC_JOIN_FINAL		0x0000000400000000ULL
 
 /* -- CCM actions -- */
 #  define	A_CCM_CONNECT		0x0000001000000000ULL
 #  define	A_CCM_DISCONNECT	0x0000002000000000ULL
 
 /* -- CIB actions -- */
 #  define	A_CIB_START		0x0000020000000000ULL
 #  define	A_CIB_STOP		0x0000040000000000ULL
 
 /* -- Transition Engine actions -- */
         /* Attempt to reach the newly  calculated cluster state.  This is 
          * only called once per transition (except if it is asked to
          * stop the transition or start a new one).
          * Once given a cluster state to reach, the TE will determin
          * tasks that can be performed in parallel, execute them, wait
          * for replies and then determin the next set until the new
          * state is reached or no further tasks can be taken.
          */
 #  define	A_TE_INVOKE		0x0000100000000000ULL
 #  define	A_TE_START		0x0000200000000000ULL
 #  define	A_TE_STOP		0x0000400000000000ULL
 #  define	A_TE_CANCEL		0x0000800000000000ULL
 #  define	A_TE_HALT		0x0001000000000000ULL
 
 /* -- Policy Engine actions -- */
         /* Calculate the next state for the cluster.  This is only
          * invoked once per needed calculation.
          */
 #  define	A_PE_INVOKE		0x0002000000000000ULL
 #  define	A_PE_START		0x0004000000000000ULL
 #  define	A_PE_STOP		0x0008000000000000ULL
 /* -- Misc actions -- */
         /* Add a system generate "block" so that resources arent moved
          * to or are activly moved away from the affected node.  This
          * way we can return quickly even if busy with other things.
          */
 #  define	A_NODE_BLOCK		0x0010000000000000ULL
         /* Update our information in the local CIB */
 #  define A_UPDATE_NODESTATUS	0x0020000000000000ULL
 #  define	A_CIB_BUMPGEN		0x0040000000000000ULL
 #  define	A_READCONFIG		0x0080000000000000ULL
 
 /* -- LRM Actions -- */
         /* Connect to the Local Resource Manager */
 #  define	A_LRM_CONNECT		0x0100000000000000ULL
         /* Disconnect from the Local Resource Manager */
 #  define A_LRM_DISCONNECT	0x0200000000000000ULL
 #  define A_LRM_INVOKE		0x0400000000000000ULL
 #  define A_LRM_EVENT		0x0800000000000000ULL
 
 /* -- Logging actions -- */
 #  define	A_LOG			0x1000000000000000ULL
 #  define	A_ERROR			0x2000000000000000ULL
 #  define	A_WARN			0x4000000000000000ULL
 
 #  define O_EXIT (A_SHUTDOWN|A_STOP|A_CCM_DISCONNECT|A_LRM_DISCONNECT|A_HA_DISCONNECT|A_EXIT_0|A_CIB_STOP)
 #  define O_RELEASE  (A_DC_TIMER_STOP|A_DC_RELEASE|A_PE_STOP|A_TE_STOP|A_DC_RELEASED)
 #  define	O_PE_RESTART		(A_PE_START|A_PE_STOP)
 #  define	O_TE_RESTART		(A_TE_START|A_TE_STOP)
 #  define	O_CIB_RESTART		(A_CIB_START|A_CIB_STOP)
 #  define	O_LRM_RECONNECT		(A_LRM_CONNECT|A_LRM_DISCONNECT)
 #  define O_DC_TIMER_RESTART	(A_DC_TIMER_STOP|A_DC_TIMER_START)
 /*======================================
  *
  * "register" contents
  *
  * Things we may want to remember regardless of which state we are in.
  *
  * These also count as inputs for synthesizing I_*
  *
  *======================================*/
 #  define	R_THE_DC	0x00000001ULL
                                         /* Are we the DC? */
 #  define	R_STARTING	0x00000002ULL
                                         /* Are we starting up? */
 #  define	R_SHUTDOWN	0x00000004ULL
                                         /* Are we trying to shut down? */
 #  define	R_STAYDOWN	0x00000008ULL
                                         /* Should we restart? */
 
 #  define R_JOIN_OK	0x00000010ULL   /* Have we completed the join process */
 #  define	R_READ_CONFIG	0x00000040ULL
 #  define	R_INVOKE_PE	0x00000080ULL
                                         /* Does the PE needed to be invoked at
                                            the next appropriate point? */
 
 #  define	R_CIB_CONNECTED	0x00000100ULL
                                         /* Is the CIB connected? */
 #  define	R_PE_CONNECTED	0x00000200ULL
                                         /* Is the Policy Engine connected? */
 #  define	R_TE_CONNECTED	0x00000400ULL
                                         /* Is the Transition Engine connected? */
 #  define	R_LRM_CONNECTED	0x00000800ULL
                                         /* Is the Local Resource Manager
                                            connected? */
 
 #  define	R_CIB_REQUIRED	0x00001000ULL
                                         /* Is the CIB required? */
 #  define	R_PE_REQUIRED	0x00002000ULL
                                         /* Is the Policy Engine required? */
 #  define	R_TE_REQUIRED	0x00004000ULL
                                         /* Is the Transition Engine required? */
 #  define	R_ST_REQUIRED	0x00008000ULL
                                         /* Is the Stonith daemon required? */
 
 #  define	R_CIB_DONE	0x00010000ULL
                                         /* Have we calculated the CIB? */
 #  define R_HAVE_CIB	0x00020000ULL   /* Do we have an up-to-date CIB */
 #  define R_CIB_ASKED	0x00040000ULL   /* Have we asked for an up-to-date CIB */
 
 #  define R_MEMBERSHIP	0x00100000ULL   /* Have we got CCM data yet */
 #  define R_PEER_DATA	0x00200000ULL   /* Have we got T_CL_STATUS data yet */
 
 #  define R_HA_DISCONNECTED  0x00400000ULL      /* did we sign out of our own accord */
 #  define R_CCM_DISCONNECTED 0x00800000ULL      /* did we sign out of our own accord */
 
 #  define	R_REQ_PEND	0x01000000ULL
                                         /* Are there Requests waiting for
                                            processing? */
 #  define	R_PE_PEND	0x02000000ULL
                                         /* Has the PE been invoked and we're
                                            awaiting a reply? */
 #  define	R_TE_PEND	0x04000000ULL
                                         /* Has the TE been invoked and we're
                                            awaiting completion? */
 #  define	R_RESP_PEND	0x08000000ULL
                                         /* Do we have clients waiting on a
                                            response? if so perhaps we shouldnt
                                            stop yet */
 
 #  define R_IN_TRANSITION	0x10000000ULL
                                         /*  */
 #  define R_SENT_RSC_STOP 0x20000000ULL /* Have we sent a stop action to all
                                          * resources in preparation for
                                          * shutting down */
 
 #  define R_IN_RECOVERY	0x80000000ULL
 
 enum crmd_fsa_cause {
     C_UNKNOWN = 0,
     C_STARTUP,
     C_IPC_MESSAGE,
     C_HA_MESSAGE,
     C_CCM_CALLBACK,
     C_CRMD_STATUS_CALLBACK,
     C_LRM_OP_CALLBACK,
     C_LRM_MONITOR_CALLBACK,
     C_TIMER_POPPED,
     C_SHUTDOWN,
     C_HEARTBEAT_FAILED,
     C_SUBSYSTEM_CONNECT,
     C_HA_DISCONNECT,
     C_FSA_INTERNAL,
     C_ILLEGAL
 };
 
 extern const char *fsa_input2string(enum crmd_fsa_input input);
 extern const char *fsa_state2string(enum crmd_fsa_state state);
 extern const char *fsa_cause2string(enum crmd_fsa_cause cause);
 extern const char *fsa_action2string(long long action);
 
 #endif
diff --git a/cts/CM_ais.py b/cts/CM_ais.py
index 97dba38227..c53b91272d 100644
--- a/cts/CM_ais.py
+++ b/cts/CM_ais.py
@@ -1,445 +1,445 @@
 '''CTS: Cluster Testing System: AIS dependent modules...
 '''
 
 __copyright__='''
 Copyright (C) 2007 Andrew Beekhof <andrew@suse.de>
 
 '''
 
 #
 # This program is free software; you can redistribute it and/or
 # modify it under the terms of the GNU General Public License
 # as published by the Free Software Foundation; either version 2
 # of the License, or (at your option) any later version.
 #
 # This program is distributed in the hope that it will be useful,
 # but WITHOUT ANY WARRANTY; without even the implied warranty of
 # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 # GNU General Public License for more details.
 #
 # You should have received a copy of the GNU General Public License
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA.
 
 import os, sys, warnings
 from cts.CTSvars import *
 from cts.CM_lha  import crm_lha
 from cts.CTS     import Process
 
 #######################################################################
 #
 #  LinuxHA v2 dependent modules
 #
 #######################################################################
 
 class crm_ais(crm_lha):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None):
         crm_lha.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-ais",
 
             "EpocheCmd"      : "crm_node -e --openais",
             "QuorumCmd"      : "crm_node -q --openais",
             "ParitionCmd"    : "crm_node -p --openais",
 
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:ChildExit"    : "Child process .* exited",
 
             # Bad news Regexes.  Should never occur.
             "BadRegexes"   : (
                 r" trace:",
                 r"error:",
                 r"crit:",
                 r"ERROR:",
                 r"CRIT:",
                 r"Shutting down...NOW",
                 r"Timer I_TERMINATE just popped",
                 r"input=I_ERROR",
                 r"input=I_FAIL",
                 r"input=I_INTEGRATED cause=C_TIMER_POPPED",
                 r"input=I_FINALIZED cause=C_TIMER_POPPED",
                 r"input=I_ERROR",
                 r", exiting\.",
                 r"(WARN|warn).*Ignoring HA message.*vote.*not in our membership list",
                 r"pengine.*Attempting recovery of resource",
                 r"is taking more than 2x its timeout",
                 r"Confirm not received from",
                 r"Welcome reply not received from",
                 r"Attempting to schedule .* after a stop",
                 r"Resource .* was active at shutdown",
                 r"duplicate entries for call_id",
                 r"Search terminated:",
                 r":global_timer_callback",
                 r"Faking parameter digest creation",
                 r"Parameters to .* action changed:",
                 r"Parameters to .* changed",
                 r"Child process .* terminated with signal",
                 r"LogActions: Recover",
                 r"rsyslogd.* imuxsock lost .* messages from pid .* due to rate-limiting",
                 r"Peer is not part of our cluster",
                 r"We appear to be in an election loop",
                 r"Unknown node -> we will not deliver message",
                 r"crm_write_blackbox",
                 r"pacemakerd.*Could not connect to Cluster Configuration Database API",
-                r"Recieving messages from a node we think is dead",
+                r"Receiving messages from a node we think is dead",
                 r"share the same cluster nodeid",
                 r"share the same name",
 
                 #r"crm_ipc_send:.*Request .* failed",
-                #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is recieved",
+                #r"crm_ipc_send:.*Sending to .* is disabled until pending reply is received",
 
                 # Not inherently bad, but worth tracking
                 #r"No need to invoke the TE",
                 #r"ping.*: DEBUG: Updated connected = 0",
                 #r"Digest mis-match:",
                 r"te_graph_trigger: Transition failed: terminated",
                 #r"Executing .* fencing operation",
                 #r"fence_pcmk.* Call to fence",
                 #r"fence_pcmk",
                 r"cman killed by node",
                 r"Election storm",
                 r"stalled the FSA with pending inputs",
             ),
         })
 
     def errorstoignore(self):
         # At some point implement a more elegant solution that
         #   also produces a report at the end
         '''Return list of errors which are known and very noisey should be ignored'''
         if 1:
             return [
                 r"crm_mon:",
                 r"crmadmin:",
                 r"update_trace_data",
                 r"async_notify: strange, client not found",
                 r"Parse error: Ignoring unknown option .*nodename",
                 r"error: log_operation: Operation 'reboot' .* with device 'FencingFail' returned:",
                 r"Child process .* terminated with signal 9",
                 r"getinfo response error: 1$",
                 ]
         return []
 
     def NodeUUID(self, node):
         return node
 
     def ais_components(self):
         fullcomplist = {}
         self.complist = []
         self.common_ignore = [
                     "Pending action:",
                     "error: crm_log_message_adv:",
                     "error: MSG: No message to dump",
                     "resources were active at shutdown",
                     "pending LRM operations at shutdown",
                     "Lost connection to the CIB service",
                     "Connection to the CIB terminated...",
                     "Sending message to CIB service FAILED",
                     "apply_xml_diff: Diff application failed!",
                     "crmd.*Action A_RECOVER .* not supported",
                     "unconfirmed_actions: Waiting on .* unconfirmed actions",
                     "cib_native_msgready: Message pending on command channel",
                     "crmd.*do_exit: Performing A_EXIT_1 - forcefully exiting the CRMd",
                     "verify_stopped: Resource .* was active at shutdown.  You may ignore this error if it is unmanaged.",
                     "error: attrd_connection_destroy: Lost connection to attrd",
                     "info: te_fence_node: Executing .* fencing operation",
                     "crm_write_blackbox:",
 #                    "error: native_create_actions: Resource .*stonith::.* is active on 2 nodes attempting recovery",
 #                    "error: process_pe_message: Transition .* ERRORs found during PE processing",
             ]
 
         cib_ignore = [
             "lrmd.*error: crm_ipc_read: Connection to stonith-ng failed",
             "lrmd.*error: mainloop_gio_callback: Connection to stonith-ng.* closed",
             "lrmd.*error: stonith_connection_destroy_cb: LRMD lost STONITH connection",
             "lrmd.*error: stonith_connection_failed: STONITH connection failed, finalizing .* pending operations",
             ]
 
         fullcomplist["cib"] = Process(self, "cib", pats = [
                     "State transition .* S_RECOVERY",
                     "Respawning .* crmd",
                     "Respawning .* attrd",
                     "error: crm_ipc_read: Connection to cib_.* failed",
                     "error: mainloop_gio_callback: Connection to cib_.* closed",
                     "Connection to the CIB terminated...",
                     "Child process crmd .* exited: Generic Pacemaker error",
                     "Child process attrd .* exited: Connection reset by peer",
                      "error: attrd_cib_destroy_cb: Lost connection to CIB service",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
                     "crmd.*Could not recover from internal error",
                     ], badnews_ignore = cib_ignore, common_ignore = self.common_ignore)
 
         fullcomplist["lrmd"] = Process(self, "lrmd", pats = [
                     "State transition .* S_RECOVERY",
                     "LRM Connection failed",
                     "Respawning .* crmd",
                     "error: crm_ipc_read: Connection to lrmd failed",
                     "error: mainloop_gio_callback: Connection to lrmd.* closed",
                     "crmd.*I_ERROR.*lrm_connection_destroy",
                     "Child process crmd .* exited: Generic Pacemaker error",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.* Could not recover from internal error",
                     ], badnews_ignore = self.common_ignore)
 
         fullcomplist["crmd"] = Process(self, "crmd", pats = [
 #                    "WARN: determine_online_status: Node .* is unclean",
 #                    "Scheduling Node .* for STONITH",
 #                    "Executing .* fencing operation",
 # Only if the node wasn't the DC:  "State transition S_IDLE",
                     "State transition .* -> S_IDLE",
                     ], badnews_ignore = self.common_ignore)
 
         fullcomplist["attrd"] = Process(self, "attrd", pats = [
                     ], badnews_ignore = self.common_ignore)
 
         fullcomplist["pengine"] = Process(self, "pengine", dc_pats = [
                     "State transition .* S_RECOVERY",
                     "Respawning .* crmd",
                     "Child process crmd .* exited: Generic Pacemaker error",
                     "crm_ipc_read: Connection to pengine failed",
                     "error: mainloop_gio_callback: Connection to pengine.* closed",
                     "crit: pe_ipc_destroy: Connection to the Policy Engine failed",
                     "crmd.*I_ERROR.*save_cib_contents",
                     "crmd.*Input I_TERMINATE from do_recover",
                     "crmd.* Could not recover from internal error",
                     ], badnews_ignore = self.common_ignore)
 
         stonith_ignore = [
             "LogActions: Recover Fencing",
             "update_failcount: Updating failcount for Fencing",
             "error: te_connect_stonith: Sign-in failed: triggered a retry",
             "stonith_connection_failed: STONITH connection failed, finalizing .* pending operations.",
             "process_lrm_event: LRM operation Fencing.* Error"
             ]
 
         stonith_ignore.extend(self.common_ignore)
 
         fullcomplist["stonith-ng"] = Process(self, "stonith-ng", process="stonithd", pats = [
                 "crm_ipc_read: Connection to stonith-ng failed",
                 "stonith_connection_destroy_cb: LRMD lost STONITH connection",
                 "mainloop_gio_callback: Connection to stonith-ng.* closed",
                 "tengine_stonith_connection_destroy: Fencing daemon connection failed",
                 "crmd.*stonith_api_add_notification: Callback already present",
                 ], badnews_ignore = stonith_ignore)
 
         vgrind = self.Env["valgrind-procs"].split()
         for key in fullcomplist.keys():
             if self.Env["valgrind-tests"]:
                 if key in vgrind:
                     # Processes running under valgrind can't be shot with "killall -9 processname"
                     self.log("Filtering %s from the component list as it is being profiled by valgrind" % key)
                     continue
             if key == "stonith-ng" and not self.Env["DoFencing"]:
                 continue
 
             self.complist.append(fullcomplist[key])
 
         #self.complist = [ fullcomplist["pengine"] ]
         return self.complist
 
 class crm_whitetank(crm_ais):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None):
         crm_ais.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-whitetank",
             "StartCmd"       : "service openais start",
             "StopCmd"        : "service openais stop",
 
             "Pat:We_stopped"   : "%s.*openais.*pcmk_shutdown: Shutdown complete",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "openais:.*Node %s is now: lost",
 
             "Pat:ChildKilled"  : "%s openais.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s openais.*Respawning failed child process: %s",
             "Pat:ChildExit"    : "Child process .* exited",
         })
 
     def Components(self):
         self.ais_components()
 
         aisexec_ignore = [
                     "error: ais_dispatch: Receiving message .* failed",
                     "crmd.*I_ERROR.*crmd_cib_connection_destroy",
                     "cib.*error: cib_cs_destroy: AIS connection terminated",
                     #"crmd.*error: crm_ais_destroy: AIS connection terminated",
                     "crmd.* Could not recover from internal error",
                     "crmd.*I_TERMINATE.*do_recover",
                     "attrd.*attrd_cs_destroy: Lost connection to Corosync service!",
                     "stonithd.*error: Corosync connection terminated",
             ]
 
         aisexec_ignore.extend(self.common_ignore)
 
         self.complist.append(Process(self, "aisexec", pats = [
                     "error: ais_dispatch: AIS connection failed",
                     "crmd.*error: do_exit: Could not recover from internal error",
                     "pengine.*Scheduling Node .* for STONITH",
                     "stonithd.*requests a STONITH operation RESET on node",
                     "stonithd.*Succeeded to STONITH the node",
                     ], badnews_ignore = aisexec_ignore))
 
 class crm_cs_v0(crm_ais):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
 
     crm clusters running against version 0 of our plugin
     '''
     def __init__(self, Environment, randseed=None):
         crm_ais.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-plugin-v0",
             "StartCmd"       : "service corosync start",
             "StopCmd"        : "service corosync stop",
 
 # The next pattern is too early
 #            "Pat:We_stopped"   : "%s.*Service engine unloaded: Pacemaker Cluster Manager",
 # The next pattern would be preferred, but it doesn't always come out
 #            "Pat:We_stopped"   : "%s.*Corosync Cluster Engine exiting with status",
             "Pat:We_stopped"  : "%s.*Service engine unloaded: corosync cluster quorum service",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "corosync:.*Node %s is now: lost",
 
             "Pat:ChildKilled"  : "%s corosync.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s corosync.*Respawning failed child process: %s",
         })
 
     def Components(self):
         self.ais_components()
 
         corosync_ignore = [
             r"error: pcmk_cpg_dispatch: Connection to the CPG API failed: Library error",
             r"pacemakerd.*error: pcmk_child_exit: Child process .* exited",
             r"cib.*error: cib_cs_destroy: Corosync connection lost",
             r"stonith-ng.*error: stonith_peer_cs_destroy: Corosync connection terminated",
             r"error: pcmk_child_exit: Child process cib .* exited: Invalid argument",
             r"error: pcmk_child_exit: Child process attrd .* exited: Transport endpoint is not connected",
             r"error: pcmk_child_exit: Child process crmd .* exited: Link has been severed",
             r"lrmd.*error: crm_ipc_read: Connection to stonith-ng failed",
             r"lrmd.*error: mainloop_gio_callback: Connection to stonith-ng.* closed",
             r"lrmd.*error: stonith_connection_destroy_cb: LRMD lost STONITH connection",
             r"crmd.*do_state_transition: State transition .* S_RECOVERY",
             r"crmd.*error: do_log: FSA: Input I_ERROR",
             r"crmd.*error: do_log: FSA: Input I_TERMINATE",
             r"crmd.*error: pcmk_cman_dispatch: Connection to cman failed",
             r"crmd.*error: crmd_fast_exit: Could not recover from internal error",
             r"error: crm_ipc_read: Connection to cib_shm failed",
             r"error: mainloop_gio_callback: Connection to cib_shm.* closed",
             r"error: stonith_connection_failed: STONITH connection failed",
             ]
 
         self.complist.append(Process(self, "corosync", pats = [
                     r"pacemakerd.*error: cfg_connection_destroy: Connection destroyed",
                     r"pacemakerd.*error: mcp_cpg_destroy: Connection destroyed",
                     r"crit: attrd_cpg_destroy: Lost connection to Corosync service",
                     r"stonith_peer_cs_destroy: Corosync connection terminated",
                     r"cib_cs_destroy: Corosync connection lost!  Exiting.",
                     r"crmd_(cs|quorum)_destroy: connection terminated",
                     r"pengine.*Scheduling Node .* for STONITH",
                     r"tengine_stonith_notify: Peer .* was terminated .*: OK",
                     ], badnews_ignore = corosync_ignore, common_ignore = self.common_ignore))
 
         return self.complist
 
 class crm_cs_v1(crm_cs_v0):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
 
     crm clusters running on top of version 1 of our plugin
     '''
     def __init__(self, Environment, randseed=None):
         crm_cs_v0.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-plugin-v1",
             "StartCmd"       : "service corosync start && service pacemaker start",
             "StopCmd"        : "service pacemaker stop; service corosync stop",
 
             "EpocheCmd"      : "crm_node -e",
             "QuorumCmd"      : "crm_node -q",
             "ParitionCmd"    : "crm_node -p",
 
             "Pat:We_stopped"  : "%s.*Service engine unloaded: corosync cluster quorum service",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildKilled"  : "%s pacemakerd.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s",
         })
 
 class crm_mcp(crm_cs_v0):
     '''
     The crm version 4 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of native corosync (no plugins)
     '''
     def __init__(self, Environment, randseed=None):
         crm_cs_v0.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-mcp",
             "StartCmd"       : "service corosync start && service pacemaker start",
             "StopCmd"        : "service pacemaker stop; service corosync stop",
 
             "EpocheCmd"      : "crm_node -e",
             "QuorumCmd"      : "crm_node -q",
             "ParitionCmd"    : "crm_node -p",
 
             # Close enough... "Corosync Cluster Engine exiting normally" isn't printed
             #   reliably and there's little interest in doing anything it
             "Pat:We_stopped"   : "%s.*Unloading all Corosync service engines",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildKilled"  : "%s pacemakerd.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s",
 
             "Pat:InfraUp"      : "%s corosync.*Initializing transport",
             "Pat:PacemakerUp"  : "%s pacemakerd.*Starting Pacemaker",
         })
 
         if self.Env["have_systemd"]:
             self.update({
                 # When systemd is in use, we can look for this instead
                 "Pat:We_stopped"   : "%s.*Stopped Corosync Cluster Engine",
             })
 
 class crm_cman(crm_cs_v0):
     '''
     The crm version 3 cluster manager class.
     It implements the things we need to talk to and manipulate
     crm clusters running on top of openais
     '''
     def __init__(self, Environment, randseed=None):
         crm_cs_v0.__init__(self, Environment, randseed=randseed)
 
         self.update({
             "Name"           : "crm-cman",
             "StartCmd"       : "service pacemaker start",
             "StopCmd"        : "service pacemaker stop",
 
             "EpocheCmd"      : "crm_node -e --cman",
             "QuorumCmd"      : "crm_node -q --cman",
             "ParitionCmd"    : "crm_node -p --cman",
 
             "Pat:We_stopped"   : "%s.*Unloading all Corosync service engines",
             "Pat:They_stopped" : "%s crmd.*Node %s\[.*state is now lost",
             "Pat:They_dead"    : "crmd.*Node %s\[.*state is now lost",
 
             "Pat:ChildKilled"  : "%s pacemakerd.*Child process %s terminated with signal 9",
             "Pat:ChildRespawn" : "%s pacemakerd.*Respawning failed child process: %s",
         })
diff --git a/doc/msg-schema.txt b/doc/msg-schema.txt
index cefa46fe3e..683e4b8cc3 100644
--- a/doc/msg-schema.txt
+++ b/doc/msg-schema.txt
@@ -1,166 +1,166 @@
 Background
 ##################
 First of all, go look at the diagram (comms.gif), read this, then 
 look at the diagram again.
 
 Next, some terminology...  
 Here I will use CRM to refer to the light blue section.  That is, 
 the entire collection of processes/daemons/modules on a node that, 
 as a whole, manage resources in the cluster.  CRMd refers to one 
 of the dark blue boxes.  It is the "master subsystem" if you like.  
 Its role is to co-ordinate the actions of all the other pieces of 
 the puzzle, including those on other nodes.
  
 Key points from the diagram:
 - All communications with the CRM are done with Heartbeat messages 
   routed through the CRMd.  These messages contain a text 
   representation of an XML document, the schema of which is outlined 
   at the end of this document.
 - All communications internal to the CRM (ie. between its subsystems) 
   is performed with IPC messages.  Again all messages are routed via 
   the CRMd and contain the same XML documents as Heartbeat messages.
 - All admin clients (eventually) end up sending Heartbeat messages 
   and are thus subject to existing HA client security is available.
 - The RPC layer allows the cluster to be controled from non-member 
   hosts (subject to RPC security which is available for free).
 - The option of syncronous or asynchronous RPC calls will be provided. 
   This will probably be in the form of a flag sent as part of the 
   function call.
 
 Advantages:
 - The only source of "requests" is the CRMd which means it *never* has 
   to forward on "request" messages for any of it sub-systems.  This is 
   useful for the security of the system (see security.txt).
 - Potentially, most CRM<-->CRM communications can be replaced with RPC 
   calls.
 - We are able to re-use existing security mechanisms (IPC, HA, RPC, 
   unix_auth via RPC) to protect the system.
 
 Message scenarios:
 ##################
 There are really only 3 messaging scenarios in this system (exluding 
 broadcast vs. point-to-point).  Again this is nice as it keeps down the 
 number of "special cases".
 
 1) Sub-system <--(IPC)--> CRM <--(IPC)--> Sub-system 
 2) Sub-system <--(IPC)--> Local CRM <--(Heartbeat)--> Remote CRM <--(IPC)--> Sub-system 
 3) Admin Client <--(Heartbeat Broadcast)--> Remote CRM <--(IPC)--> Sub-system 
 
 Message examples:
 ##################
 1.1) the DC telling the local LRM to start a resource
 1.2) the LRM asking the CIB about a resource
 
 2.1) the DC telling a remote LRM to start a resource
 2.2) the DC asking (all) the CIB(s) to provide their view of the world
 
 3.1) an admin request to add/remove/modify a resource
 3.2) an admin request to force a failover of a resource or a
 recomputation of the resource dependancies.
 
 Message Notes:
 ##################
 Messages may be sent to the CRM from local sub-systems via IPC or from
 other HA clients via Heartbeat.  It is then the responsibility of the
 CRM to unpack the message and pass it on to the correct sub-system.  If
 the destination sub-system is the DC and the DC is not running on the
 current node, the message is discarded without error.
 
-Where the DC recieves a message from another node, it will also keep
+Where the DC receives a message from another node, it will also keep
 track of the sending host and the reference number so that it can direct
 the replies appropriately.  The exception to this is where the message
 is from the DC.  
 
 Messages to the DC are *always* sent as broadcast messages and the DC
 *must always* acknowledge the message with either the results of the
 message or a "thankyou" message.  The reason for this is that the DC may
 change or a DC election may be in progress.  The implication of this is
 that the sender should always set a timer and resend dc_messages if they
 have not been acknowledged.  The DC will be able to detect duplicates by
 examining the destination sub-system and the reference number and we
 will rely on HA to ensure the delivery of DC responses.
 
 All messages are full crm_messages.  I toyed with only sending the *_request
 or *_response piece of the message to and/or from the relevant sub-systems, 
 but it just got messy.  This way, the routing role of the CRM is much easier.
 And easier equals lower complexity, which means less bugs, which is good for
 everyone.
 
 Schema Notes:
 ###################
 
 Key Attributes
 ===============
 
 reference:	provides the ability to track which request a responce
 		is in relation to and where the local CRM should send it.
 *_filter:	allow the operation to be limited to a particular type,
 		id and/or priority
 timeout:	allows the receiver to know how long the sender is
 		expecting the task to take so we can act and report back 
 		accordingly.
 
 
 Attribute values
 =================
 Where the list ends with |... , the complete list of possibilities will be 
 fleshed out at a later date.
 
 Message Schema:
 ###################
 
 <!ELEMENT crm_message (options, data?)>
 <!ATTLIST crm_message
           version          #CDATA                       '1'
 	  message_type	   (none|request|response)	'none'
           sys_from         (none|crmd|cib|lrm|admin)	'none'
           sys_to           (none|crmd|cib|lrm|admin)	'none'
 	  host_from	   #CDATA
 	  host_to	   #CDATA
 	  reference	   #CDATA
           timestamp        #CDATA                       '0'>
 
 
 <!ELEMENT options>
 <!ATTLIST options
 	  operation		#CDATA
 	  result?		(ok|failed|...)		'ok'
           verbose?		(true|false)		'false'
           timeout?		#CDATA			'0'
 	  filter_priority?	#CDATA	<!-- might be useful later -->
           filter_type?		#CDATA
 	  filter_id?		#CDATA>
 
 <!-- data is one of ping_item, cib_fragment, lrm_status -->
 
 <!ELEMENT ping_item>
 <!ATTLIST ping_item
 	  crm_subsystem    (none|crmd|dc|cib|lrm)		     'none'
 	  ping_status	   (error|timeout|stopped|running|sick)	     'timeout'>
 
 <!ELEMENT lrm_status (resource_info)*>
 <!ATTLIST lrm_status>
 
 <!ELEMENT resource_info>
 <!ATTLIST resource_info 
 	  res_id		#CDATA
           last_op		(noop|start|stop|restart)	     'noop'
           last_op_result        (fail|pass|unknown|...)		     'unknown'
 	  status		#CDATA>
 
 
 <!-- always describe which part of the cib are being returned -->
 <!ELEMENT cib_fragment (cib, obj_failed?)>
 <!ATTLIST cib_fragment
           cib_section  (none|all|nodes|resources|constraints|status) 'none'>
 
 <!ELEMENT obj_failed (failed_update)*>
 <!ATTLIST obj_failed>
 
 <!ELEMENT failed_update>
 <!ATTLIST failed_update 
 	  id			#CDATA
           object_type		(none|node|resource|constraint|state) 'none'
           operation		(none|add|update|delete|replace)      'none'
 	  reason?		(unknown|...)			      'unknown'>
 
diff --git a/fencing/commands.c b/fencing/commands.c
index 73a7fc1f80..c090eeaab7 100644
--- a/fencing/commands.c
+++ b/fencing/commands.c
@@ -1,2067 +1,2067 @@
 /*
  * Copyright (C) 2009 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/mainloop.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #if SUPPORT_CIBSECRETS
 #  include <crm/common/cib_secrets.h>
 #endif
 
 #include <internal.h>
 
 GHashTable *device_list = NULL;
 GHashTable *topology = NULL;
 GList *cmd_list = NULL;
 
 static int active_children = 0;
 
 struct device_search_s {
     char *host;
     char *action;
     int per_device_timeout;
     int replies_needed;
     int replies_received;
 
     void *user_data;
     void (*callback) (GList * devices, void *user_data);
     GListPtr capable;
 };
 
 static gboolean stonith_device_dispatch(gpointer user_data);
 static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data);
 static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
                                const char *client_id);
 
 static void search_devices_record_result(struct device_search_s *search, const char *device,
                                          gboolean can_fence);
 
 typedef struct async_command_s {
 
     int id;
     int pid;
     int fd_stdout;
     int options;
     int default_timeout;
     int timeout;
 
     char *op;
     char *origin;
     char *client;
     char *client_name;
     char *remote_op_id;
 
     char *victim;
     uint32_t victim_nodeid;
     char *action;
     char *device;
     char *mode;
 
     GListPtr device_list;
     GListPtr device_next;
 
     void *internal_user_data;
     void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data);
     guint timer_sigterm;
     guint timer_sigkill;
     /*! If the operation timed out, this is the last signal
      *  we sent to the process to get it to terminate */
     int last_timeout_signo;
 } async_command_t;
 
 static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char *output,
                                               xmlNode * data, int rc);
 
 static int
 get_action_timeout(stonith_device_t * device, const char *action, int default_timeout)
 {
     char buffer[512] = { 0, };
     char *value = NULL;
 
     CRM_CHECK(action != NULL, return default_timeout);
 
     if (!device->params) {
         return default_timeout;
     }
 
     snprintf(buffer, sizeof(buffer) - 1, "pcmk_%s_timeout", action);
     value = g_hash_table_lookup(device->params, buffer);
 
     if (!value) {
         return default_timeout;
     }
 
     return atoi(value);
 }
 
 static void
 free_async_command(async_command_t * cmd)
 {
     if (!cmd) {
         return;
     }
     cmd_list = g_list_remove(cmd_list, cmd);
 
     g_list_free_full(cmd->device_list, free);
     free(cmd->device);
     free(cmd->action);
     free(cmd->victim);
     free(cmd->remote_op_id);
     free(cmd->client);
     free(cmd->client_name);
     free(cmd->origin);
     free(cmd->mode);
     free(cmd->op);
     free(cmd);
 }
 
 static async_command_t *
 create_async_command(xmlNode * msg)
 {
     async_command_t *cmd = NULL;
     xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR);
     const char *action = crm_element_value(op, F_STONITH_ACTION);
 
     CRM_CHECK(action != NULL, crm_log_xml_warn(msg, "NoAction"); return NULL);
 
     crm_log_xml_trace(msg, "Command");
     cmd = calloc(1, sizeof(async_command_t));
     crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id));
     crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options));
     crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout));
     cmd->timeout = cmd->default_timeout;
 
     cmd->origin = crm_element_value_copy(msg, F_ORIG);
     cmd->remote_op_id = crm_element_value_copy(msg, F_STONITH_REMOTE_OP_ID);
     cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID);
     cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME);
     cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION);
     cmd->action = strdup(action);
     cmd->victim = crm_element_value_copy(op, F_STONITH_TARGET);
     cmd->mode = crm_element_value_copy(op, F_STONITH_MODE);
     cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE);
 
     CRM_CHECK(cmd->op != NULL, crm_log_xml_warn(msg, "NoOp"); free_async_command(cmd); return NULL);
     CRM_CHECK(cmd->client != NULL, crm_log_xml_warn(msg, "NoClient"));
 
     cmd->done_cb = st_child_done;
     cmd_list = g_list_append(cmd_list, cmd);
     return cmd;
 }
 
 static gboolean
 stonith_device_execute(stonith_device_t * device)
 {
     int exec_rc = 0;
     async_command_t *cmd = NULL;
     stonith_action_t *action = NULL;
 
     CRM_CHECK(device != NULL, return FALSE);
 
     if (device->active_pid) {
         crm_trace("%s is still active with pid %u", device->id, device->active_pid);
         return TRUE;
     }
 
     if (device->pending_ops) {
         GList *first = device->pending_ops;
 
         device->pending_ops = g_list_remove_link(device->pending_ops, first);
         cmd = first->data;
         g_list_free_1(first);
     }
 
     if (cmd == NULL) {
         crm_trace("Nothing further to do for %s", device->id);
         return TRUE;
     }
 
 #if SUPPORT_CIBSECRETS
     if (replace_secret_params(device->id, device->params) < 0) {
         /* replacing secrets failed! */
         if (safe_str_eq(cmd->action,"stop")) {
             /* don't fail on stop! */
             crm_info("proceeding with the stop operation for %s", device->id);
 
         } else {
             crm_err("failed to get secrets for %s, "
                     "considering resource not configured", device->id);
             exec_rc = PCMK_OCF_NOT_CONFIGURED;
             cmd->done_cb(0, exec_rc, NULL, cmd);
             return TRUE;
         }
     }
 #endif
 
     action = stonith_action_create(device->agent,
                                    cmd->action,
                                    cmd->victim,
                                    cmd->victim_nodeid,
                                    cmd->timeout, device->params, device->aliases);
 
     /* for async exec, exec_rc is pid if positive and error code if negative/zero */
     exec_rc = stonith_action_execute_async(action, (void *)cmd, cmd->done_cb);
 
     if (exec_rc > 0) {
         crm_debug("Operation %s%s%s on %s now running with pid=%d, timeout=%ds",
                   cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
                   device->id, exec_rc, cmd->timeout);
         device->active_pid = exec_rc;
 
     } else {
         crm_warn("Operation %s%s%s on %s failed: %s (%d)",
                  cmd->action, cmd->victim ? " for node " : "", cmd->victim ? cmd->victim : "",
                  device->id, pcmk_strerror(exec_rc), exec_rc);
         cmd->done_cb(0, exec_rc, NULL, cmd);
     }
     return TRUE;
 }
 
 static gboolean
 stonith_device_dispatch(gpointer user_data)
 {
     return stonith_device_execute(user_data);
 }
 
 static void
 schedule_stonith_command(async_command_t * cmd, stonith_device_t * device)
 {
     CRM_CHECK(cmd != NULL, return);
     CRM_CHECK(device != NULL, return);
 
     if (cmd->device) {
         free(cmd->device);
     }
 
     if (device->include_nodeid && cmd->victim) {
         crm_node_t *node = crm_get_peer(0, cmd->victim);
 
         cmd->victim_nodeid = node->id;
     }
 
     cmd->device = strdup(device->id);
     cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout);
 
     if (cmd->remote_op_id) {
         crm_debug("Scheduling %s on %s for remote peer %s with op id (%s) (timeout=%ds)",
                   cmd->action, device->id, cmd->origin, cmd->remote_op_id, cmd->timeout);
     } else {
         crm_debug("Scheduling %s on %s for %s (timeout=%ds)",
                   cmd->action, device->id, cmd->client, cmd->timeout);
     }
 
     device->pending_ops = g_list_append(device->pending_ops, cmd);
     mainloop_set_trigger(device->work);
 }
 
 void
 free_device(gpointer data)
 {
     GListPtr gIter = NULL;
     stonith_device_t *device = data;
 
     g_hash_table_destroy(device->params);
     g_hash_table_destroy(device->aliases);
 
     for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) {
         async_command_t *cmd = gIter->data;
 
         crm_warn("Removal of device '%s' purged operation %s", device->id, cmd->action);
         cmd->done_cb(0, -ENODEV, NULL, cmd);
         free_async_command(cmd);
     }
     g_list_free(device->pending_ops);
 
     g_list_free_full(device->targets, free);
 
     mainloop_destroy_trigger(device->work);
 
     free_xml(device->agent_metadata);
     free(device->namespace);
     free(device->on_target_actions);
     free(device->agent);
     free(device->id);
     free(device);
 }
 
 static GHashTable *
 build_port_aliases(const char *hostmap, GListPtr * targets)
 {
     char *name = NULL;
     int last = 0, lpc = 0, max = 0, added = 0;
     GHashTable *aliases =
         g_hash_table_new_full(crm_str_hash, g_str_equal, g_hash_destroy_str, g_hash_destroy_str);
 
     if (hostmap == NULL) {
         return aliases;
     }
 
     max = strlen(hostmap);
     for (; lpc <= max; lpc++) {
         switch (hostmap[lpc]) {
                 /* Assignment chars */
             case '=':
             case ':':
                 if (lpc > last) {
                     free(name);
                     name = calloc(1, 1 + lpc - last);
                     memcpy(name, hostmap + last, lpc - last);
                 }
                 last = lpc + 1;
                 break;
 
                 /* Delimeter chars */
                 /* case ',': Potentially used to specify multiple ports */
             case 0:
             case ';':
             case ' ':
             case '\t':
                 if (name) {
                     char *value = NULL;
 
                     value = calloc(1, 1 + lpc - last);
                     memcpy(value, hostmap + last, lpc - last);
 
                     crm_debug("Adding alias '%s'='%s'", name, value);
                     g_hash_table_replace(aliases, name, value);
                     if (targets) {
                         *targets = g_list_append(*targets, strdup(value));
                     }
                     value = NULL;
                     name = NULL;
                     added++;
 
                 } else if (lpc > last) {
                     crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last);
                 }
 
                 last = lpc + 1;
                 break;
         }
 
         if (hostmap[lpc] == 0) {
             break;
         }
     }
 
     if (added == 0) {
         crm_info("No host mappings detected in '%s'", hostmap);
     }
 
     free(name);
     return aliases;
 }
 
 static void
 parse_host_line(const char *line, int max, GListPtr * output)
 {
     int lpc = 0;
     int last = 0;
 
     if (max <= 0) {
         return;
     }
 
     /* Check for any complaints about additional parameters that the device doesn't understand */
     if (strstr(line, "invalid") || strstr(line, "variable")) {
         crm_debug("Skipping: %s", line);
         return;
     }
 
     crm_trace("Processing %d bytes: [%s]", max, line);
     /* Skip initial whitespace */
     for (lpc = 0; lpc <= max && isspace(line[lpc]); lpc++) {
         last = lpc + 1;
     }
 
     /* Now the actual content */
     for (lpc = 0; lpc <= max; lpc++) {
         gboolean a_space = isspace(line[lpc]);
 
         if (a_space && lpc < max && isspace(line[lpc + 1])) {
             /* fast-forward to the end of the spaces */
 
         } else if (a_space || line[lpc] == ',' || line[lpc] == 0) {
             int rc = 1;
             char *entry = NULL;
 
             if (lpc != last) {
                 entry = calloc(1, 1 + lpc - last);
                 rc = sscanf(line + last, "%[a-zA-Z0-9_-.]", entry);
             }
 
             if (entry == NULL) {
                 /* Skip */
             } else if (rc != 1) {
                 crm_warn("Could not parse (%d %d): %s", last, lpc, line + last);
             } else if (safe_str_neq(entry, "on") && safe_str_neq(entry, "off")) {
                 crm_trace("Adding '%s'", entry);
                 *output = g_list_append(*output, entry);
                 entry = NULL;
             }
 
             free(entry);
             last = lpc + 1;
         }
     }
 }
 
 static GListPtr
 parse_host_list(const char *hosts)
 {
     int lpc = 0;
     int max = 0;
     int last = 0;
     GListPtr output = NULL;
 
     if (hosts == NULL) {
         return output;
     }
 
     max = strlen(hosts);
     for (lpc = 0; lpc <= max; lpc++) {
         if (hosts[lpc] == '\n' || hosts[lpc] == 0) {
             char *line = NULL;
             int len = lpc - last;
 
             if(len > 1) {
                 line = malloc(1 + len);
             }
 
             if(line) {
                 snprintf(line, 1 + len, "%s", hosts + last);
                 line[len] = 0; /* Because it might be '\n' */
                 parse_host_line(line, len, &output);
                 free(line);
             }
 
             last = lpc + 1;
         }
     }
 
     crm_trace("Parsed %d entries from '%s'", g_list_length(output), hosts);
     return output;
 }
 
 static xmlNode *
 get_agent_metadata(const char *agent)
 {
     stonith_t *st = stonith_api_new();
     xmlNode *xml = NULL;
     char *buffer = NULL;
     int rc = 0;
 
     rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10);
     if (rc || !buffer) {
         crm_err("Could not retrieve metadata for fencing agent %s", agent);
         return NULL;
     }
     xml = string2xml(buffer);
     free(buffer);
     stonith_api_delete(st);
 
     return xml;
 }
 
 static gboolean
 is_nodeid_required(xmlNode * xml)
 {
     xmlXPathObjectPtr xpath = NULL;
 
     if (stand_alone) {
         return FALSE;
     }
 
     if (!xml) {
         return FALSE;
     }
 
     xpath = xpath_search(xml, "//parameter[@name='nodeid']");
     if (numXpathResults(xpath)  <= 0) {
         freeXpathObject(xpath);
         return FALSE;
     }
 
     freeXpathObject(xpath);
     return TRUE;
 }
 
 static char *
 get_on_target_actions(xmlNode * xml)
 {
     char *actions = NULL;
     xmlXPathObjectPtr xpath = NULL;
     int max = 0;
     int lpc = 0;
 
     if (!xml) {
         return NULL;
     }
 
     xpath = xpath_search(xml, "//action");
     max = numXpathResults(xpath);
 
     if (max <= 0) {
         freeXpathObject(xpath);
         return NULL;
     }
 
     actions = calloc(1, 512);
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *on_target = NULL;
         const char *action = NULL;
         xmlNode *match = getXpathResult(xpath, lpc);
 
         CRM_CHECK(match != NULL, continue);
 
         on_target = crm_element_value(match, "on_target");
         action = crm_element_value(match, "name");
 
         if (action && crm_is_true(on_target)) {
             if (strlen(actions)) {
                 g_strlcat(actions, " ", 512);
             }
             g_strlcat(actions, action, 512);
         }
     }
 
     freeXpathObject(xpath);
 
     if (!strlen(actions)) {
         free(actions);
         actions = NULL;
     }
 
     return actions;
 }
 
 static stonith_device_t *
 build_device_from_xml(xmlNode * msg)
 {
     const char *value = NULL;
     xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
     stonith_device_t *device = NULL;
 
     device = calloc(1, sizeof(stonith_device_t));
     device->id = crm_element_value_copy(dev, XML_ATTR_ID);
     device->agent = crm_element_value_copy(dev, "agent");
     device->namespace = crm_element_value_copy(dev, "namespace");
     device->params = xml2list(dev);
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST);
     if (value) {
         device->targets = parse_host_list(value);
     }
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP);
     device->aliases = build_port_aliases(value, &(device->targets));
 
     device->agent_metadata = get_agent_metadata(device->agent);
     device->on_target_actions = get_on_target_actions(device->agent_metadata);
 
     value = g_hash_table_lookup(device->params, "nodeid");
     if (!value) {
         device->include_nodeid = is_nodeid_required(device->agent_metadata);
     }
 
     if (device->on_target_actions) {
         crm_info("The fencing device '%s' requires actions (%s) to be executed on the target node",
                  device->id, device->on_target_actions);
     }
 
     device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
     /* TODO: Hook up priority */
 
     return device;
 }
 
 static const char *
 target_list_type(stonith_device_t * dev)
 {
     const char *check_type = NULL;
 
     check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK);
 
     if (check_type == NULL) {
 
         if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) {
             check_type = "static-list";
         } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) {
             check_type = "static-list";
         } else {
             check_type = "dynamic-list";
         }
     }
 
     return check_type;
 }
 
 void
 schedule_internal_command(const char *origin,
                           stonith_device_t * device,
                           const char *action,
                           const char *victim,
                           int timeout,
                           void *internal_user_data,
                           void (*done_cb) (GPid pid, int rc, const char *output,
                                            gpointer user_data))
 {
     async_command_t *cmd = NULL;
 
     cmd = calloc(1, sizeof(async_command_t));
 
     cmd->id = -1;
     cmd->default_timeout = timeout ? timeout : 60;
     cmd->timeout = cmd->default_timeout;
     cmd->action = strdup(action);
     cmd->victim = victim ? strdup(victim) : NULL;
     cmd->device = strdup(device->id);
     cmd->origin = strdup(origin);
     cmd->client = strdup(crm_system_name);
     cmd->client_name = strdup(crm_system_name);
 
     cmd->internal_user_data = internal_user_data;
     cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */
 
     schedule_stonith_command(cmd, device);
 }
 
 static gboolean
 string_in_list(GListPtr list, const char *item)
 {
     int lpc = 0;
     int max = g_list_length(list);
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *value = g_list_nth_data(list, lpc);
 
         if (safe_str_eq(item, value)) {
             return TRUE;
         } else {
             crm_trace("%d: '%s' != '%s'", lpc, item, value);
         }
     }
     return FALSE;
 }
 
 static void
 status_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t *cmd = user_data;
     struct device_search_s *search = cmd->internal_user_data;
     stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
     gboolean can = FALSE;
 
     free_async_command(cmd);
 
     if (!dev) {
         search_devices_record_result(search, NULL, FALSE);
         return;
     }
 
     dev->active_pid = 0;
     mainloop_set_trigger(dev->work);
 
     if (rc == 1 /* unkown */ ) {
         crm_trace("Host %s is not known by %s", search->host, dev->id);
 
     } else if (rc == 0 /* active */  || rc == 2 /* inactive */ ) {
         can = TRUE;
 
     } else {
         crm_notice("Unkown result when testing if %s can fence %s: rc=%d", dev->id, search->host,
                    rc);
     }
     search_devices_record_result(search, dev->id, can);
 }
 
 static void
 dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t *cmd = user_data;
     struct device_search_s *search = cmd->internal_user_data;
     stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
     gboolean can_fence = FALSE;
 
     free_async_command(cmd);
 
     /* Host/alias must be in the list output to be eligable to be fenced
      *
      * Will cause problems if down'd nodes aren't listed or (for virtual nodes)
      *  if the guest is still listed despite being moved to another machine
      */
     if (!dev) {
         search_devices_record_result(search, NULL, FALSE);
         return;
     }
 
     dev->active_pid = 0;
     mainloop_set_trigger(dev->work);
 
     /* If we successfully got the targets earlier, don't disable. */
     if (rc != 0 && !dev->targets) {
         crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output);
         /* Fall back to status */
         g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status"));
 
         g_list_free_full(dev->targets, free);
         dev->targets = NULL;
     } else if (!rc) {
         crm_info("Refreshing port list for %s", dev->id);
         g_list_free_full(dev->targets, free);
         dev->targets = parse_host_list(output);
         dev->targets_age = time(NULL);
     }
 
     if (dev->targets) {
         const char *alias = g_hash_table_lookup(dev->aliases, search->host);
 
         if (!alias) {
             alias = search->host;
         }
         if (string_in_list(dev->targets, alias)) {
             can_fence = TRUE;
         }
     }
     search_devices_record_result(search, dev->id, can_fence);
 }
 
 /*!
  * \internal
  * \brief Checks to see if an identical device already exists in the device_list
  */
 static stonith_device_t *
 device_has_duplicate(stonith_device_t * device)
 {
     char *key = NULL;
     char *value = NULL;
     GHashTableIter gIter;
     stonith_device_t *dup = g_hash_table_lookup(device_list, device->id);
 
     if (!dup) {
         crm_trace("No match for %s", device->id);
         return NULL;
 
     } else if (safe_str_neq(dup->agent, device->agent)) {
         crm_trace("Different agent: %s != %s", dup->agent, device->agent);
         return NULL;
     }
 
     /* Use calculate_operation_digest() here? */
     g_hash_table_iter_init(&gIter, device->params);
     while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) {
 
         if(strstr(key, "CRM_meta") == key) {
             continue;
         } else if(strcmp(key, "crm_feature_set") == 0) {
             continue;
         } else {
             char *other_value = g_hash_table_lookup(dup->params, key);
 
             if (!other_value || safe_str_neq(other_value, value)) {
                 crm_trace("Different value for %s: %s != %s", key, other_value, value);
                 return NULL;
             }
         }
     }
 
     crm_trace("Match");
     return dup;
 }
 
 int
 stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
 {
     stonith_device_t *dup = NULL;
     stonith_device_t *device = build_device_from_xml(msg);
 
     dup = device_has_duplicate(device);
     if (dup) {
         crm_notice("Device '%s' already existed in device list (%d active devices)", device->id,
                    g_hash_table_size(device_list));
         free_device(device);
         device = dup;
 
     } else {
         stonith_device_t *old = g_hash_table_lookup(device_list, device->id);
 
         if (from_cib && old && old->api_registered) {
             /* If the cib is writing over an entry that is shared with a stonith client,
              * copy any pending ops that currently exist on the old entry to the new one.
              * Otherwise the pending ops will be reported as failures
              */
             crm_trace("Overwriting an existing entry for %s from the cib", device->id);
             device->pending_ops = old->pending_ops;
             device->api_registered = TRUE;
             old->pending_ops = NULL;
             if (device->pending_ops) {
                 mainloop_set_trigger(device->work);
             }
         }
         g_hash_table_replace(device_list, device->id, device);
 
         crm_notice("Added '%s' to the device list (%d active devices)", device->id,
                    g_hash_table_size(device_list));
     }
     if (desc) {
         *desc = device->id;
     }
 
     if (from_cib) {
         device->cib_registered = TRUE;
     } else {
         device->api_registered = TRUE;
     }
 
     return pcmk_ok;
 }
 
 int
 stonith_device_remove(const char *id, gboolean from_cib)
 {
     stonith_device_t *device = g_hash_table_lookup(device_list, id);
 
     if (!device) {
         crm_info("Device '%s' not found (%d active devices)", id, g_hash_table_size(device_list));
         return pcmk_ok;
     }
 
     if (from_cib) {
         device->cib_registered = FALSE;
     } else {
         device->verified = FALSE;
         device->api_registered = FALSE;
     }
 
     if (!device->cib_registered && !device->api_registered) {
         g_hash_table_remove(device_list, id);
         crm_info("Removed '%s' from the device list (%d active devices)",
                  id, g_hash_table_size(device_list));
     }
     return pcmk_ok;
 }
 
 static int
 count_active_levels(stonith_topology_t * tp)
 {
     int lpc = 0;
     int count = 0;
 
     for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
         if (tp->levels[lpc] != NULL) {
             count++;
         }
     }
     return count;
 }
 
 void
 free_topology_entry(gpointer data)
 {
     stonith_topology_t *tp = data;
 
     int lpc = 0;
 
     for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
         if (tp->levels[lpc] != NULL) {
             g_list_free_full(tp->levels[lpc], free);
         }
     }
     free(tp->node);
     free(tp);
 }
 
 int
 stonith_level_register(xmlNode * msg, char **desc)
 {
     int id = 0;
     int rc = pcmk_ok;
     xmlNode *child = NULL;
 
     xmlNode *level = get_xpath_object("//" F_STONITH_LEVEL, msg, LOG_ERR);
     const char *node = crm_element_value(level, F_STONITH_TARGET);
     stonith_topology_t *tp = g_hash_table_lookup(topology, node);
 
     crm_element_value_int(level, XML_ATTR_ID, &id);
     if (desc) {
         *desc = g_strdup_printf("%s[%d]", node, id);
     }
     if (id <= 0 || id >= ST_LEVEL_MAX) {
         return -EINVAL;
     }
 
     if (tp == NULL) {
         tp = calloc(1, sizeof(stonith_topology_t));
         tp->node = strdup(node);
         g_hash_table_replace(topology, tp->node, tp);
         crm_trace("Added %s to the topology (%d active entries)", node,
                   g_hash_table_size(topology));
     }
 
     if (tp->levels[id] != NULL) {
         crm_info("Adding to the existing %s[%d] topology entry (%d active entries)", node, id,
                  count_active_levels(tp));
     }
 
     for (child = __xml_first_child(level); child != NULL; child = __xml_next(child)) {
         const char *device = ID(child);
 
         crm_trace("Adding device '%s' for %s (%d)", device, node, id);
         tp->levels[id] = g_list_append(tp->levels[id], strdup(device));
     }
 
     crm_info("Node %s has %d active fencing levels", node, count_active_levels(tp));
     return rc;
 }
 
 int
 stonith_level_remove(xmlNode * msg, char **desc)
 {
     int id = 0;
     xmlNode *level = get_xpath_object("//" F_STONITH_LEVEL, msg, LOG_ERR);
     const char *node = crm_element_value(level, F_STONITH_TARGET);
     stonith_topology_t *tp = g_hash_table_lookup(topology, node);
 
     if (desc) {
         *desc = g_strdup_printf("%s[%d]", node, id);
     }
     crm_element_value_int(level, XML_ATTR_ID, &id);
 
     if (tp == NULL) {
         crm_info("Node %s not found (%d active entries)", node, g_hash_table_size(topology));
         return pcmk_ok;
 
     } else if (id < 0 || id >= ST_LEVEL_MAX) {
         return -EINVAL;
     }
 
     if (id == 0 && g_hash_table_remove(topology, node)) {
         crm_info("Removed all %s related entries from the topology (%d active entries)",
                  node, g_hash_table_size(topology));
 
     } else if (id > 0 && tp->levels[id] != NULL) {
         g_list_free_full(tp->levels[id], free);
         tp->levels[id] = NULL;
 
         crm_info("Removed entry '%d' from %s's topology (%d active entries remaining)",
                  id, node, count_active_levels(tp));
     }
     return pcmk_ok;
 }
 
 static int
 stonith_device_action(xmlNode * msg, char **output)
 {
     int rc = pcmk_ok;
     xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
     const char *id = crm_element_value(dev, F_STONITH_DEVICE);
 
     async_command_t *cmd = NULL;
     stonith_device_t *device = NULL;
 
     if (id) {
         crm_trace("Looking for '%s'", id);
         device = g_hash_table_lookup(device_list, id);
     }
 
     if (device) {
         cmd = create_async_command(msg);
         if (cmd == NULL) {
             free_device(device);
             return -EPROTO;
         }
 
         schedule_stonith_command(cmd, device);
         rc = -EINPROGRESS;
 
     } else {
         crm_info("Device %s not found", id ? id : "<none>");
         rc = -ENODEV;
     }
     return rc;
 }
 
 static void
 search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence)
 {
     search->replies_received++;
 
     if (can_fence && device) {
         search->capable = g_list_append(search->capable, strdup(device));
     }
 
     if (search->replies_needed == search->replies_received) {
 
         crm_debug("Finished Search. %d devices can perform action (%s) on node %s",
                   g_list_length(search->capable),
                   search->action ? search->action : "<unknown>",
                   search->host ? search->host : "<anyone>");
 
         search->callback(search->capable, search->user_data);
         free(search->host);
         free(search->action);
         free(search);
     }
 }
 
 static void
 can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *search)
 {
     gboolean can = FALSE;
     const char *check_type = NULL;
     const char *host = search->host;
     const char *alias = NULL;
 
     CRM_LOG_ASSERT(dev != NULL);
 
     if (dev == NULL) {
         goto search_report_results;
     } else if (host == NULL) {
         can = TRUE;
         goto search_report_results;
     }
 
     if (dev->on_target_actions &&
         search->action &&
         strstr(dev->on_target_actions, search->action) && safe_str_neq(host, stonith_our_uname)) {
         /* this device can only execute this action on the target node */
         goto search_report_results;
     }
 
     alias = g_hash_table_lookup(dev->aliases, host);
     if (alias == NULL) {
         alias = host;
     }
 
     check_type = target_list_type(dev);
 
     if (safe_str_eq(check_type, "none")) {
         can = TRUE;
 
     } else if (safe_str_eq(check_type, "static-list")) {
 
         /* Presence in the hostmap is sufficient
          * Only use if all hosts on which the device can be active can always fence all listed hosts
          */
 
         if (string_in_list(dev->targets, host)) {
             can = TRUE;
         } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)
                    && g_hash_table_lookup(dev->aliases, host)) {
             can = TRUE;
         }
 
     } else if (safe_str_eq(check_type, "dynamic-list")) {
         time_t now = time(NULL);
 
         if (dev->targets == NULL || dev->targets_age + 60 < now) {
             schedule_internal_command(__FUNCTION__, dev, "list", NULL,
                                       search->per_device_timeout, search, dynamic_list_search_cb);
 
             /* we'll respond to this search request async in the cb */
             return;
         }
 
         if (string_in_list(dev->targets, alias)) {
             can = TRUE;
         }
 
     } else if (safe_str_eq(check_type, "status")) {
         schedule_internal_command(__FUNCTION__, dev, "status", search->host,
                                   search->per_device_timeout, search, status_search_cb);
         /* we'll respond to this search request async in the cb */
         return;
     } else {
         crm_err("Unknown check type: %s", check_type);
     }
 
     if (safe_str_eq(host, alias)) {
         crm_notice("%s can%s fence %s: %s", dev->id, can ? "" : " not", host, check_type);
     } else {
         crm_notice("%s can%s fence %s (aka. '%s'): %s", dev->id, can ? "" : " not", host, alias,
                    check_type);
     }
 
   search_report_results:
     search_devices_record_result(search, dev ? dev->id : NULL, can);
 }
 
 static void
 search_devices(gpointer key, gpointer value, gpointer user_data)
 {
     stonith_device_t *dev = value;
     struct device_search_s *search = user_data;
 
     can_fence_host_with_device(dev, search);
 }
 
 #define DEFAULT_QUERY_TIMEOUT 20
 static void
 get_capable_devices(const char *host, const char *action, int timeout, void *user_data,
                     void (*callback) (GList * devices, void *user_data))
 {
     struct device_search_s *search;
     int per_device_timeout = DEFAULT_QUERY_TIMEOUT;
     int devices_needing_async_query = 0;
     char *key = NULL;
     const char *check_type = NULL;
     GHashTableIter gIter;
     stonith_device_t *device = NULL;
 
     if (!g_hash_table_size(device_list)) {
         callback(NULL, user_data);
         return;
     }
 
     search = calloc(1, sizeof(struct device_search_s));
     if (!search) {
         callback(NULL, user_data);
         return;
     }
 
     g_hash_table_iter_init(&gIter, device_list);
     while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&device)) {
         check_type = target_list_type(device);
         if (safe_str_eq(check_type, "status") || safe_str_eq(check_type, "dynamic-list")) {
             devices_needing_async_query++;
         }
     }
 
     /* If we have devices that require an async event in order to know what
      * nodes they can fence, we have to give the events a timeout. The total
      * query timeout is divided among those events. */
     if (devices_needing_async_query) {
         per_device_timeout = timeout / devices_needing_async_query;
         if (!per_device_timeout) {
             crm_err("stonith-timeout duration %d is too low, raise the duration to %d seconds",
                     timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
             per_device_timeout = DEFAULT_QUERY_TIMEOUT;
         } else if (per_device_timeout < DEFAULT_QUERY_TIMEOUT) {
             crm_notice
                 ("stonith-timeout duration %d is low for the current configuration. Consider raising it to %d seconds",
                  timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
         }
     }
 
     search->host = host ? strdup(host) : NULL;
     search->action = action ? strdup(action) : NULL;
     search->per_device_timeout = per_device_timeout;
     /* We are guaranteed this many replies. Even if a device gets
      * unregistered some how during the async search, we will get
      * the correct number of replies. */
     search->replies_needed = g_hash_table_size(device_list);
     search->callback = callback;
     search->user_data = user_data;
     /* kick off the search */
 
     crm_debug("Searching through %d devices to see what is capable of action (%s) for target %s",
               search->replies_needed,
               search->action ? search->action : "<unknown>",
               search->host ? search->host : "<anyone>");
     g_hash_table_foreach(device_list, search_devices, search);
 }
 
 struct st_query_data {
     xmlNode *reply;
     char *remote_peer;
     char *client_id;
     char *target;
     char *action;
     int call_options;
 };
 
 static void
 stonith_query_capable_device_cb(GList * devices, void *user_data)
 {
     struct st_query_data *query = user_data;
     int available_devices = 0;
     xmlNode *dev = NULL;
     xmlNode *list = NULL;
     GListPtr lpc = NULL;
 
     /* Pack the results into data */
     list = create_xml_node(NULL, __FUNCTION__);
     crm_xml_add(list, F_STONITH_TARGET, query->target);
     for (lpc = devices; lpc != NULL; lpc = lpc->next) {
         stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data);
         int action_specific_timeout;
 
         if (!device) {
             /* It is possible the device got unregistered while
              * determining who can fence the target */
             continue;
         }
 
         available_devices++;
 
         action_specific_timeout = get_action_timeout(device, query->action, 0);
         dev = create_xml_node(list, F_STONITH_DEVICE);
         crm_xml_add(dev, XML_ATTR_ID, device->id);
         crm_xml_add(dev, "namespace", device->namespace);
         crm_xml_add(dev, "agent", device->agent);
         crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified);
         if (action_specific_timeout) {
             crm_xml_add_int(dev, F_STONITH_ACTION_TIMEOUT, action_specific_timeout);
         }
         if (query->target == NULL) {
             xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS);
 
             g_hash_table_foreach(device->params, hash2field, attrs);
         }
     }
 
     crm_xml_add_int(list, "st-available-devices", available_devices);
     if (query->target) {
         crm_debug("Found %d matching devices for '%s'", available_devices, query->target);
     } else {
         crm_debug("%d devices installed", available_devices);
     }
 
     if (list != NULL) {
         crm_trace("Attaching query list output");
         add_message_xml(query->reply, F_STONITH_CALLDATA, list);
     }
     stonith_send_reply(query->reply, query->call_options, query->remote_peer, query->client_id);
 
     free_xml(query->reply);
     free(query->remote_peer);
     free(query->client_id);
     free(query->target);
     free(query->action);
     free(query);
     free_xml(list);
     g_list_free_full(devices, free);
 }
 
 static void
 stonith_query(xmlNode * msg, const char *remote_peer, const char *client_id, int call_options)
 {
     struct st_query_data *query = NULL;
     const char *action = NULL;
     const char *target = NULL;
     int timeout = 0;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_DEBUG_3);
 
     crm_element_value_int(msg, F_STONITH_TIMEOUT, &timeout);
     if (dev) {
         const char *device = crm_element_value(dev, F_STONITH_DEVICE);
 
         target = crm_element_value(dev, F_STONITH_TARGET);
         action = crm_element_value(dev, F_STONITH_ACTION);
         if (device && safe_str_eq(device, "manual_ack")) {
             /* No query or reply necessary */
             return;
         }
     }
 
     crm_log_xml_debug(msg, "Query");
     query = calloc(1, sizeof(struct st_query_data));
 
     query->reply = stonith_construct_reply(msg, NULL, NULL, pcmk_ok);
     query->remote_peer = remote_peer ? strdup(remote_peer) : NULL;
     query->client_id = client_id ? strdup(client_id) : NULL;
     query->target = target ? strdup(target) : NULL;
     query->action = action ? strdup(action) : NULL;
     query->call_options = call_options;
 
     get_capable_devices(target, action, timeout, query, stonith_query_capable_device_cb);
 }
 
 #define ST_LOG_OUTPUT_MAX 512
 static void
 log_operation(async_command_t * cmd, int rc, int pid, const char *next, const char *output)
 {
     if (rc == 0) {
         next = NULL;
     }
 
     if (cmd->victim != NULL) {
         do_crm_log(rc == 0 ? LOG_NOTICE : LOG_ERR,
                    "Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned: %d (%s)%s%s",
                    cmd->action, pid, cmd->id, cmd->client_name, cmd->victim, cmd->device, rc,
                    pcmk_strerror(rc), next ? ". Trying: " : "", next ? next : "");
     } else {
         do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
                             "Operation '%s' [%d] for device '%s' returned: %d (%s)%s%s",
                             cmd->action, pid, cmd->device, rc, pcmk_strerror(rc),
                             next ? ". Trying: " : "", next ? next : "");
     }
 
     if (output) {
         /* Logging the whole string confuses syslog when the string is xml */
         char *prefix = g_strdup_printf("%s:%d", cmd->device, pid);
 
         crm_log_output(rc == 0 ? LOG_INFO : LOG_WARNING, prefix, output);
         g_free(prefix);
     }
 }
 
 static void
 stonith_send_async_reply(async_command_t * cmd, const char *output, int rc, GPid pid)
 {
     xmlNode *reply = NULL;
     gboolean bcast = FALSE;
 
     reply = stonith_construct_async_reply(cmd, output, NULL, rc);
 
     if (safe_str_eq(cmd->action, "metadata")) {
         /* Too verbose to log */
         crm_trace("Metadata query for %s", cmd->device);
         output = NULL;
 
     } else if (crm_str_eq(cmd->action, "monitor", TRUE) ||
                crm_str_eq(cmd->action, "list", TRUE) || crm_str_eq(cmd->action, "status", TRUE)) {
         crm_trace("Never broadcast %s replies", cmd->action);
 
     } else if (!stand_alone && safe_str_eq(cmd->origin, cmd->victim)) {
         crm_trace("Broadcast %s reply for %s", cmd->action, cmd->victim);
         crm_xml_add(reply, F_SUBTYPE, "broadcast");
         bcast = TRUE;
     }
 
     log_operation(cmd, rc, pid, NULL, output);
     crm_log_xml_trace(reply, "Reply");
 
     if (bcast) {
         crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY);
         send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE);
 
     } else if (cmd->origin) {
         crm_trace("Directed reply to %s", cmd->origin);
         send_cluster_message(crm_get_peer(0, cmd->origin), crm_msg_stonith_ng, reply, FALSE);
 
     } else {
         crm_trace("Directed local %ssync reply to %s",
                   (cmd->options & st_opt_sync_call) ? "" : "a-", cmd->client_name);
         do_local_reply(reply, cmd->client, cmd->options & st_opt_sync_call, FALSE);
     }
 
     if (stand_alone) {
         /* Do notification with a clean data object */
         xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
         crm_xml_add_int(notify_data, F_STONITH_RC, rc);
         crm_xml_add(notify_data, F_STONITH_TARGET, cmd->victim);
         crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op);
         crm_xml_add(notify_data, F_STONITH_DELEGATE, cmd->device);
         crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
         crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client);
 
         do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
     }
 
     free_xml(reply);
 }
 
 void
 unfence_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t * cmd = user_data;
     stonith_device_t *dev = g_hash_table_lookup(device_list, cmd->device);
 
     log_operation(cmd, rc, pid, NULL, output);
 
     if(dev) {
         dev->active_pid = 0;
         mainloop_set_trigger(dev->work);
     } else {
         crm_trace("Device %s does not exist", cmd->device);
     }
 
     if(rc != 0) {
         crm_exit(DAEMON_RESPAWN_STOP);
     }
 }
 
 static void
 cancel_stonith_command(async_command_t * cmd)
 {
     stonith_device_t *device;
 
     CRM_CHECK(cmd != NULL, return);
 
     if (!cmd->device) {
         return;
     }
 
     device = g_hash_table_lookup(device_list, cmd->device);
 
     if (device) {
         crm_trace("Cancel scheduled %s on %s", cmd->action, device->id);
         device->pending_ops = g_list_remove(device->pending_ops, cmd);
     }
 }
 
 #define READ_MAX 500
 static void
 st_child_done(GPid pid, int rc, const char *output, gpointer user_data)
 {
     stonith_device_t *device = NULL;
     async_command_t *cmd = user_data;
 
     GListPtr gIter = NULL;
     GListPtr gIterNext = NULL;
 
     CRM_CHECK(cmd != NULL, return);
 
     active_children--;
 
     /* The device is ready to do something else now */
     device = g_hash_table_lookup(device_list, cmd->device);
     if (device) {
         device->active_pid = 0;
         if (rc == pcmk_ok &&
             (safe_str_eq(cmd->action, "list") ||
              safe_str_eq(cmd->action, "monitor") || safe_str_eq(cmd->action, "status"))) {
 
             device->verified = TRUE;
         }
 
         mainloop_set_trigger(device->work);
     }
 
     crm_trace("Operation %s on %s completed with rc=%d (%d remaining)",
               cmd->action, cmd->device, rc, g_list_length(cmd->device_next));
 
     if (rc != 0 && cmd->device_next) {
         stonith_device_t *dev = g_hash_table_lookup(device_list, cmd->device_next->data);
 
         if (dev) {
             log_operation(cmd, rc, pid, dev->id, output);
 
             cmd->device_next = cmd->device_next->next;
             schedule_stonith_command(cmd, dev);
             /* Prevent cmd from being freed */
             cmd = NULL;
             goto done;
         }
     }
 
     if (rc > 0) {
         /* Try to provide _something_ useful */
         if(output == NULL) {
             rc = -ENODATA;
 
         } else if(strstr(output, "imed out")) {
             rc = -ETIMEDOUT;
 
         } else if(strstr(output, "Unrecognised action")) {
             rc = -EOPNOTSUPP;
 
         } else {
             rc = -pcmk_err_generic;
         }
     }
 
     stonith_send_async_reply(cmd, output, rc, pid);
 
     if (rc != 0) {
         goto done;
     }
 
     /* Check to see if any operations are scheduled to do the exact
      * same thing that just completed.  If so, rather than
      * performing the same fencing operation twice, return the result
      * of this operation for all pending commands it matches. */
     for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
         async_command_t *cmd_other = gIter->data;
 
         gIterNext = gIter->next;
 
         if (cmd == cmd_other) {
             continue;
         }
 
         /* A pending scheduled command matches the command that just finished if.
          * 1. The client connections are different.
          * 2. The node victim is the same.
          * 3. The fencing action is the same.
          * 4. The device scheduled to execute the action is the same.
          */
         if (safe_str_eq(cmd->client, cmd_other->client) ||
             safe_str_neq(cmd->victim, cmd_other->victim) ||
             safe_str_neq(cmd->action, cmd_other->action) ||
             safe_str_neq(cmd->device, cmd_other->device)) {
 
             continue;
         }
 
         crm_notice
             ("Merging stonith action %s for node %s originating from client %s with identical stonith request from client %s",
              cmd_other->action, cmd_other->victim, cmd_other->client_name, cmd->client_name);
 
         cmd_list = g_list_remove_link(cmd_list, gIter);
 
         stonith_send_async_reply(cmd_other, output, rc, pid);
         cancel_stonith_command(cmd_other);
 
         free_async_command(cmd_other);
         g_list_free_1(gIter);
     }
 
   done:
     free_async_command(cmd);
 }
 
 static gint
 sort_device_priority(gconstpointer a, gconstpointer b)
 {
     const stonith_device_t *dev_a = a;
     const stonith_device_t *dev_b = b;
 
     if (dev_a->priority > dev_b->priority) {
         return -1;
     } else if (dev_a->priority < dev_b->priority) {
         return 1;
     }
     return 0;
 }
 
 static void
 stonith_fence_get_devices_cb(GList * devices, void *user_data)
 {
     async_command_t *cmd = user_data;
     stonith_device_t *device = NULL;
 
     crm_info("Found %d matching devices for '%s'", g_list_length(devices), cmd->victim);
 
     if (g_list_length(devices) > 0) {
         /* Order based on priority */
         devices = g_list_sort(devices, sort_device_priority);
         device = g_hash_table_lookup(device_list, devices->data);
 
         if (device) {
             cmd->device_list = devices;
             cmd->device_next = devices->next;
             devices = NULL;     /* list owned by cmd now */
         }
     }
 
     /* we have a device, schedule it for fencing. */
     if (device) {
         schedule_stonith_command(cmd, device);
         /* in progress */
         return;
     }
 
     /* no device found! */
     stonith_send_async_reply(cmd, NULL, -ENODEV, 0);
 
     free_async_command(cmd);
     g_list_free_full(devices, free);
 }
 
 static int
 stonith_fence(xmlNode * msg)
 {
     const char *device_id = NULL;
     stonith_device_t *device = NULL;
     async_command_t *cmd = create_async_command(msg);
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
 
     if (cmd == NULL) {
         return -EPROTO;
     }
 
     device_id = crm_element_value(dev, F_STONITH_DEVICE);
     if (device_id) {
         device = g_hash_table_lookup(device_list, device_id);
         if (device == NULL) {
             crm_err("Requested device '%s' is not available", device_id);
             return -ENODEV;
         }
         schedule_stonith_command(cmd, device);
 
     } else {
         const char *host = crm_element_value(dev, F_STONITH_TARGET);
 
         if (cmd->options & st_opt_cs_nodeid) {
             int nodeid = crm_atoi(host, NULL);
             crm_node_t *node = crm_get_peer(nodeid, NULL);
 
             if (node) {
                 host = node->uname;
             }
         }
         get_capable_devices(host, cmd->action, cmd->default_timeout, cmd,
                             stonith_fence_get_devices_cb);
     }
 
     return -EINPROGRESS;
 }
 
 xmlNode *
 stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, int rc)
 {
     int lpc = 0;
     xmlNode *reply = NULL;
 
     const char *name = NULL;
     const char *value = NULL;
 
     const char *names[] = {
         F_STONITH_OPERATION,
         F_STONITH_CALLID,
         F_STONITH_CLIENTID,
         F_STONITH_CLIENTNAME,
         F_STONITH_REMOTE_OP_ID,
         F_STONITH_CALLOPTS
     };
 
     crm_trace("Creating a basic reply");
     reply = create_xml_node(NULL, T_STONITH_REPLY);
 
     crm_xml_add(reply, "st_origin", __FUNCTION__);
     crm_xml_add(reply, F_TYPE, T_STONITH_NG);
     crm_xml_add(reply, "st_output", output);
     crm_xml_add_int(reply, F_STONITH_RC, rc);
 
     CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply);
     for (lpc = 0; lpc < DIMOF(names); lpc++) {
         name = names[lpc];
         value = crm_element_value(request, name);
         crm_xml_add(reply, name, value);
     }
 
     if (data != NULL) {
         crm_trace("Attaching reply output");
         add_message_xml(reply, F_STONITH_CALLDATA, data);
     }
     return reply;
 }
 
 static xmlNode *
 stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc)
 {
     xmlNode *reply = NULL;
 
     crm_trace("Creating a basic reply");
     reply = create_xml_node(NULL, T_STONITH_REPLY);
 
     crm_xml_add(reply, "st_origin", __FUNCTION__);
     crm_xml_add(reply, F_TYPE, T_STONITH_NG);
 
     crm_xml_add(reply, F_STONITH_OPERATION, cmd->op);
     crm_xml_add(reply, F_STONITH_DEVICE, cmd->device);
     crm_xml_add(reply, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
     crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client);
     crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name);
     crm_xml_add(reply, F_STONITH_TARGET, cmd->victim);
     crm_xml_add(reply, F_STONITH_ACTION, cmd->op);
     crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin);
     crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id);
     crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options);
 
     crm_xml_add_int(reply, F_STONITH_RC, rc);
 
     crm_xml_add(reply, "st_output", output);
 
     if (data != NULL) {
         crm_info("Attaching reply output");
         add_message_xml(reply, F_STONITH_CALLDATA, data);
     }
     return reply;
 }
 
 bool fencing_peer_active(crm_node_t *peer)
 {
     if (peer == NULL) {
         return FALSE;
     } else if (peer->uname == NULL) {
         return FALSE;
     } else if(peer->processes & (crm_proc_plugin | crm_proc_heartbeat | crm_proc_cpg)) {
         return TRUE;
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Determine if we need to use an alternate node to
  * fence the target. If so return that node's uname
  *
  * \retval NULL, no alternate host
  * \retval uname, uname of alternate host to use
  */
 static const char *
 check_alternate_host(const char *target)
 {
     const char *alternate_host = NULL;
 
     if (g_hash_table_lookup(topology, target) && safe_str_eq(target, stonith_our_uname)) {
         GHashTableIter gIter;
         crm_node_t *entry = NULL;
 
         g_hash_table_iter_init(&gIter, crm_peer_cache);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
             crm_trace("Checking for %s.%d != %s", entry->uname, entry->id, target);
             if (fencing_peer_active(entry)
                 && safe_str_neq(entry->uname, target)) {
                 alternate_host = entry->uname;
                 break;
             }
         }
         if (alternate_host == NULL) {
             crm_err("No alternate host available to handle complex self fencing request");
             g_hash_table_iter_init(&gIter, crm_peer_cache);
             while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
                 crm_notice("Peer[%d] %s", entry->id, entry->uname);
             }
         }
     }
 
     return alternate_host;
 }
 
 static void
 stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
                    const char *client_id)
 {
     if (remote_peer) {
         send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, reply, FALSE);
     } else {
         do_local_reply(reply, client_id, is_set(call_options, st_opt_sync_call), remote_peer != NULL);
     }
 }
 
 static int
 handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
                const char *remote_peer)
 {
     int call_options = 0;
     int rc = -EOPNOTSUPP;
 
     xmlNode *data = NULL;
     xmlNode *reply = NULL;
 
     char *output = NULL;
     const char *op = crm_element_value(request, F_STONITH_OPERATION);
     const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
 
     if (is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(client == NULL || client->request_id == id);
     }
 
     if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) {
         xmlNode *reply = create_xml_node(NULL, "reply");
 
         CRM_ASSERT(client);
         crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER);
         crm_xml_add(reply, F_STONITH_CLIENTID, client->id);
         crm_ipcs_send(client, id, reply, FALSE);
         client->request_id = 0;
         free_xml(reply);
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_EXEC, TRUE)) {
         rc = stonith_device_action(request, &output);
 
     } else if (crm_str_eq(op, STONITH_OP_TIMEOUT_UPDATE, TRUE)) {
         const char *call_id = crm_element_value(request, F_STONITH_CALLID);
         const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
         int op_timeout = 0;
 
         crm_element_value_int(request, F_STONITH_TIMEOUT, &op_timeout);
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
         if (remote_peer) {
             create_remote_stonith_op(client_id, request, TRUE); /* Record it for the future notification */
         }
         stonith_query(request, remote_peer, client_id, call_options);
         return 0;
 
     } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
         const char *flag_name = NULL;
 
         CRM_ASSERT(client);
         flag_name = crm_element_value(request, F_STONITH_NOTIFY_ACTIVATE);
         if (flag_name) {
             crm_debug("Setting %s callbacks for %s (%s): ON", flag_name, client->name, client->id);
             client->options |= get_stonith_flag(flag_name);
         }
 
         flag_name = crm_element_value(request, F_STONITH_NOTIFY_DEACTIVATE);
         if (flag_name) {
             crm_debug("Setting %s callbacks for %s (%s): off", flag_name, client->name, client->id);
             client->options |= get_stonith_flag(flag_name);
         }
 
         if (flags & crm_ipc_client_response) {
             crm_ipcs_send_ack(client, id, "ack", __FUNCTION__, __LINE__);
             client->request_id = 0;
         }
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_RELAY, TRUE)) {
         xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
 
         crm_notice("Peer %s has received a forwarded fencing request from %s to fence (%s) peer %s",
                    stonith_our_uname,
                    client ? client->name : remote_peer,
                    crm_element_value(dev, F_STONITH_ACTION),
                    crm_element_value(dev, F_STONITH_TARGET));
 
         if (initiate_remote_stonith_op(NULL, request, FALSE) != NULL) {
             rc = -EINPROGRESS;
         }
 
     } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
 
         if (remote_peer || stand_alone) {
             rc = stonith_fence(request);
 
         } else if (call_options & st_opt_manual_ack) {
             remote_fencing_op_t *rop = NULL;
             xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
             const char *target = crm_element_value(dev, F_STONITH_TARGET);
 
-            crm_notice("Recieved manual confirmation that %s is fenced", target);
+            crm_notice("Received manual confirmation that %s is fenced", target);
             rop = initiate_remote_stonith_op(client, request, TRUE);
             rc = stonith_manual_ack(request, rop);
 
         } else {
             const char *alternate_host = NULL;
             xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
             const char *target = crm_element_value(dev, F_STONITH_TARGET);
             const char *action = crm_element_value(dev, F_STONITH_ACTION);
             const char *device = crm_element_value(dev, F_STONITH_DEVICE);
 
             if (client) {
                 int tolerance = 0;
 
                 crm_notice("Client %s.%.8s wants to fence (%s) '%s' with device '%s'",
                            client->name, client->id, action, target, device ? device : "(any)");
 
                 crm_element_value_int(dev, F_STONITH_TOLERANCE, &tolerance);
 
                 if (stonith_check_fence_tolerance(tolerance, target, action)) {
                     rc = 0;
                     goto done;
                 }
 
             } else {
                 crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'",
                            remote_peer, action, target, device ? device : "(any)");
             }
 
             alternate_host = check_alternate_host(target);
 
             if (alternate_host && client) {
                 const char *client_id = NULL;
 
                 crm_notice("Forwarding complex self fencing request to peer %s", alternate_host);
 
                 if (client) {
                     client_id = client->id;
                 } else {
                     client_id = crm_element_value(request, F_STONITH_CLIENTID);
                 }
                 /* Create a record of it, otherwise call_id will be 0 if we need to notify of failures */
                 create_remote_stonith_op(client_id, request, FALSE);
 
                 crm_xml_add(request, F_STONITH_OPERATION, STONITH_OP_RELAY);
                 crm_xml_add(request, F_STONITH_CLIENTID, client->id);
                 send_cluster_message(crm_get_peer(0, alternate_host), crm_msg_stonith_ng, request,
                                      FALSE);
                 rc = -EINPROGRESS;
 
             } else if (initiate_remote_stonith_op(client, request, FALSE) != NULL) {
                 rc = -EINPROGRESS;
             }
         }
 
     } else if (crm_str_eq(op, STONITH_OP_FENCE_HISTORY, TRUE)) {
         rc = stonith_fence_history(request, &data);
 
     } else if (crm_str_eq(op, STONITH_OP_DEVICE_ADD, TRUE)) {
         const char *id = NULL;
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_device_register(request, &id, FALSE);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
 
     } else if (crm_str_eq(op, STONITH_OP_DEVICE_DEL, TRUE)) {
         xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request, LOG_ERR);
         const char *id = crm_element_value(dev, XML_ATTR_ID);
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_device_remove(id, FALSE);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(device_list));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
 
     } else if (crm_str_eq(op, STONITH_OP_LEVEL_ADD, TRUE)) {
         char *id = NULL;
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_level_register(request, &id);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
 
     } else if (crm_str_eq(op, STONITH_OP_LEVEL_DEL, TRUE)) {
         char *id = NULL;
         xmlNode *notify_data = create_xml_node(NULL, op);
 
         rc = stonith_level_remove(request, &id);
 
         crm_xml_add(notify_data, F_STONITH_DEVICE, id);
         crm_xml_add_int(notify_data, F_STONITH_ACTIVE, g_hash_table_size(topology));
 
         do_stonith_notify(call_options, op, rc, notify_data);
         free_xml(notify_data);
 
     } else if (crm_str_eq(op, STONITH_OP_CONFIRM, TRUE)) {
         async_command_t *cmd = create_async_command(request);
         xmlNode *reply = stonith_construct_async_reply(cmd, NULL, NULL, 0);
 
         crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY);
         crm_notice("Broadcasting manual fencing confirmation for node %s", cmd->victim);
         send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE);
 
         free_async_command(cmd);
         free_xml(reply);
 
     } else {
         crm_err("Unknown %s from %s", op, client ? client->name : remote_peer);
         crm_log_xml_warn(request, "UnknownOp");
     }
 
   done:
 
     /* Always reply unles the request is in process still.
      * If in progress, a reply will happen async after the request
      * processing is finished */
     if (rc != -EINPROGRESS) {
         crm_trace("Reply handling: %p %u %u %d %d %s", client, client?client->request_id:0,
                   id, is_set(call_options, st_opt_sync_call), call_options,
                   crm_element_value(request, F_STONITH_CALLOPTS));
 
         if (is_set(call_options, st_opt_sync_call)) {
             CRM_ASSERT(client == NULL || client->request_id == id);
         }
         reply = stonith_construct_reply(request, output, data, rc);
         stonith_send_reply(reply, call_options, remote_peer, client_id);
     }
 
     free(output);
     free_xml(data);
     free_xml(reply);
 
     return rc;
 }
 
 static void
 handle_reply(crm_client_t * client, xmlNode * request, const char *remote_peer)
 {
     const char *op = crm_element_value(request, F_STONITH_OPERATION);
 
     if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
         process_remote_stonith_query(request);
     } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
         process_remote_stonith_exec(request);
     } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
         /* Reply to a complex fencing op */
         process_remote_stonith_exec(request);
     } else {
         crm_err("Unknown %s reply from %s", op, client ? client->name : remote_peer);
         crm_log_xml_warn(request, "UnknownOp");
     }
 }
 
 void
 stonith_command(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
                 const char *remote_peer)
 {
     int call_options = 0;
     int rc = 0;
     gboolean is_reply = FALSE;
     char *op = crm_element_value_copy(request, F_STONITH_OPERATION);
 
     /* F_STONITH_OPERATION can be overwritten in remote_op_done() with crm_xml_add()
      *
      * by 0x4C2E934: crm_xml_add (xml.c:377)
      * by 0x40C5E9: remote_op_done (remote.c:178)
      * by 0x40F1D3: process_remote_stonith_exec (remote.c:1084)
      * by 0x40AD4F: stonith_command (commands.c:1891)
      *
      */
 
     if (get_xpath_object("//" T_STONITH_REPLY, request, LOG_DEBUG_3)) {
         is_reply = TRUE;
     }
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     crm_debug("Processing %s%s %u from %s (%16x)", op, is_reply ? " reply" : "",
               id, client ? client->name : remote_peer, call_options);
 
     if (is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(client == NULL || client->request_id == id);
     }
 
     if (is_reply) {
         handle_reply(client, request, remote_peer);
     } else {
         rc = handle_request(client, id, flags, request, remote_peer);
     }
 
     do_crm_log_unlikely(rc > 0 ? LOG_DEBUG : LOG_INFO, "Processed %s%s from %s: %s (%d)", op,
                         is_reply ? " reply" : "", client ? client->name : remote_peer,
                         rc > 0 ? "" : pcmk_strerror(rc), rc);
 
     free(op);
 }
diff --git a/lib/cib/cib_native.c b/lib/cib/cib_native.c
index 9553ba2669..f03d050605 100644
--- a/lib/cib/cib_native.c
+++ b/lib/cib/cib_native.c
@@ -1,512 +1,512 @@
 /*
  * Copyright (c) 2004 International Business Machines
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  *
  */
 #include <crm_internal.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdarg.h>
 #include <string.h>
 
 #include <glib.h>
 
 #include <crm/crm.h>
 #include <crm/cib/internal.h>
 
 #include <crm/msg_xml.h>
 #include <crm/common/mainloop.h>
 
 typedef struct cib_native_opaque_s {
     char *token;
     crm_ipc_t *ipc;
     void (*dnotify_fn) (gpointer user_data);
     mainloop_io_t *source;
 
 } cib_native_opaque_t;
 
 int cib_native_perform_op(cib_t * cib, const char *op, const char *host, const char *section,
                           xmlNode * data, xmlNode ** output_data, int call_options);
 
 int cib_native_perform_op_delegate(cib_t * cib, const char *op, const char *host,
                                    const char *section, xmlNode * data, xmlNode ** output_data,
                                    int call_options, const char *user_name);
 
 int cib_native_free(cib_t * cib);
 int cib_native_signoff(cib_t * cib);
 int cib_native_signon(cib_t * cib, const char *name, enum cib_conn_type type);
 int cib_native_signon_raw(cib_t * cib, const char *name, enum cib_conn_type type, int *event_fd);
 
 bool cib_native_dispatch(cib_t * cib);
 
 int cib_native_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data));
 
 cib_t *
 cib_native_new(void)
 {
     cib_native_opaque_t *native = NULL;
     cib_t *cib = cib_new_variant();
 
     native = calloc(1, sizeof(cib_native_opaque_t));
 
     cib->variant = cib_native;
     cib->variant_opaque = native;
 
     native->ipc = NULL;
     native->source = NULL;
     native->dnotify_fn = NULL;
 
     /* assign variant specific ops */
     cib->delegate_fn = cib_native_perform_op_delegate;
     cib->cmds->signon = cib_native_signon;
     cib->cmds->signon_raw = cib_native_signon_raw;
     cib->cmds->signoff = cib_native_signoff;
     cib->cmds->free = cib_native_free;
 
     cib->cmds->register_notification = cib_native_register_notification;
     cib->cmds->set_connection_dnotify = cib_native_set_connection_dnotify;
 
     return cib;
 }
 
 int
 cib_native_signon(cib_t * cib, const char *name, enum cib_conn_type type)
 {
     return cib_native_signon_raw(cib, name, type, NULL);
 }
 
 static int
 cib_native_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata)
 {
     const char *type = NULL;
     xmlNode *msg = NULL;
 
     cib_t *cib = userdata;
 
     crm_trace("dispatching %p", userdata);
 
     if (cib == NULL) {
         crm_err("No CIB!");
         return 0;
     }
 
     msg = string2xml(buffer);
 
     if (msg == NULL) {
         crm_warn("Received a NULL msg from CIB service.");
         return 0;
     }
 
     /* do callbacks */
     type = crm_element_value(msg, F_TYPE);
     crm_trace("Activating %s callbacks...", type);
     crm_log_xml_explicit(msg, "cib-reply");
 
     if (safe_str_eq(type, T_CIB)) {
         cib_native_callback(cib, msg, 0, 0);
 
     } else if (safe_str_eq(type, T_CIB_NOTIFY)) {
         g_list_foreach(cib->notify_list, cib_native_notify, msg);
 
     } else {
         crm_err("Unknown message type: %s", type);
     }
 
     free_xml(msg);
     return 0;
 }
 
 bool
 cib_native_dispatch(cib_t * cib)
 {
     gboolean stay_connected = TRUE;
     cib_native_opaque_t *native;
 
     if (cib == NULL) {
         crm_err("No CIB!");
         return FALSE;
     }
 
     crm_trace("dispatching %p", cib);
     native = cib->variant_opaque;
     while (crm_ipc_ready(native->ipc)) {
 
         if (crm_ipc_read(native->ipc) > 0) {
             const char *msg = crm_ipc_buffer(native->ipc);
 
             cib_native_dispatch_internal(msg, strlen(msg), cib);
         }
 
         if (crm_ipc_connected(native->ipc) == FALSE) {
             crm_err("Connection closed");
             stay_connected = FALSE;
         }
     }
 
     return stay_connected;
 }
 
 static void
 cib_native_destroy(void *userdata)
 {
     cib_t *cib = userdata;
     cib_native_opaque_t *native = cib->variant_opaque;
 
     crm_trace("destroying %p", userdata);
     cib->state = cib_disconnected;
     native->source = NULL;
     native->ipc = NULL;
 
     if (native->dnotify_fn) {
         native->dnotify_fn(userdata);
     }
 }
 
 int
 cib_native_signon_raw(cib_t * cib, const char *name, enum cib_conn_type type, int *async_fd)
 {
     int rc = pcmk_ok;
     const char *channel = NULL;
     cib_native_opaque_t *native = cib->variant_opaque;
 
     static struct ipc_client_callbacks cib_callbacks = {
         .dispatch = cib_native_dispatch_internal,
         .destroy = cib_native_destroy
     };
 
     cib->call_timeout = MAX_IPC_DELAY;
 
     if (type == cib_command) {
         cib->state = cib_connected_command;
         channel = cib_channel_rw;
 
     } else if (type == cib_command_nonblocking) {
         cib->state = cib_connected_command;
         channel = cib_channel_shm;
 
     } else if (type == cib_query) {
         cib->state = cib_connected_query;
         channel = cib_channel_ro;
 
     } else {
         return -ENOTCONN;
     }
 
     crm_trace("Connecting %s channel", channel);
 
     if (async_fd != NULL) {
         native->ipc = crm_ipc_new(channel, 0);
 
         if (native->ipc && crm_ipc_connect(native->ipc)) {
             *async_fd = crm_ipc_get_fd(native->ipc);
 
         } else if (native->ipc) {
             rc = -ENOTCONN;
         }
 
     } else {
         native->source =
             mainloop_add_ipc_client(channel, G_PRIORITY_HIGH, 512 * 1024 /* 512k */ , cib,
                                     &cib_callbacks);
         native->ipc = mainloop_get_ipc_client(native->source);
     }
 
     if (rc != pcmk_ok || native->ipc == NULL || crm_ipc_connected(native->ipc) == FALSE) {
         crm_debug("Connection unsuccessful (%d %p)", rc, native->ipc);
         rc = -ENOTCONN;
     }
 
     if (rc == pcmk_ok) {
         xmlNode *reply = NULL;
         xmlNode *hello = create_xml_node(NULL, "cib_command");
 
         crm_xml_add(hello, F_TYPE, T_CIB);
         crm_xml_add(hello, F_CIB_OPERATION, CRM_OP_REGISTER);
         crm_xml_add(hello, F_CIB_CLIENTNAME, name);
         crm_xml_add_int(hello, F_CIB_CALLOPTS, cib_sync_call);
 
         if (crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply) > 0) {
             const char *msg_type = crm_element_value(reply, F_CIB_OPERATION);
 
             rc = pcmk_ok;
             crm_log_xml_trace(reply, "reg-reply");
 
             if (safe_str_neq(msg_type, CRM_OP_REGISTER)) {
                 crm_err("Invalid registration message: %s", msg_type);
                 rc = -EPROTO;
 
             } else {
                 native->token = crm_element_value_copy(reply, F_CIB_CLIENTID);
                 if (native->token == NULL) {
                     rc = -EPROTO;
                 }
             }
             free_xml(reply);
 
         } else {
             rc = -ECOMM;
         }
 
         free_xml(hello);
     }
 
     if (rc == pcmk_ok) {
         crm_debug("Connection to CIB successful");
         return pcmk_ok;
     }
 
     crm_debug("Connection to CIB failed: %s", pcmk_strerror(rc));
     cib_native_signoff(cib);
     return rc;
 }
 
 int
 cib_native_signoff(cib_t * cib)
 {
     cib_native_opaque_t *native = cib->variant_opaque;
 
     crm_debug("Signing out of the CIB Service");
 
     if (native->source != NULL) {
         /* Attached to mainloop */
         mainloop_del_ipc_client(native->source);
         native->source = NULL;
         native->ipc = NULL;
 
     } else if (native->ipc) {
         /* Not attached to mainloop */
         crm_ipc_t *ipc = native->ipc;
 
         native->ipc = NULL;
         crm_ipc_close(ipc);
         crm_ipc_destroy(ipc);
     }
 
     cib->state = cib_disconnected;
     cib->type = cib_none;
 
     return pcmk_ok;
 }
 
 int
 cib_native_free(cib_t * cib)
 {
     int rc = pcmk_ok;
 
     if (cib->state != cib_disconnected) {
         rc = cib_native_signoff(cib);
     }
 
     if (cib->state == cib_disconnected) {
         cib_native_opaque_t *native = cib->variant_opaque;
 
         free(native->token);
         free(cib->variant_opaque);
         free(cib->cmds);
         free(cib);
     }
 
     return rc;
 }
 
 int
 cib_native_perform_op(cib_t * cib, const char *op, const char *host, const char *section,
                       xmlNode * data, xmlNode ** output_data, int call_options)
 {
     return cib_native_perform_op_delegate(cib, op, host, section,
                                           data, output_data, call_options, NULL);
 }
 
 int
 cib_native_perform_op_delegate(cib_t * cib, const char *op, const char *host, const char *section,
                                xmlNode * data, xmlNode ** output_data, int call_options,
                                const char *user_name)
 {
     int rc = pcmk_ok;
     int reply_id = 0;
     enum crm_ipc_flags ipc_flags = crm_ipc_client_none;
 
     xmlNode *op_msg = NULL;
     xmlNode *op_reply = NULL;
 
     cib_native_opaque_t *native = cib->variant_opaque;
 
     if (cib->state == cib_disconnected) {
         return -ENOTCONN;
     }
 
     if (output_data != NULL) {
         *output_data = NULL;
     }
 
     if (op == NULL) {
         crm_err("No operation specified");
         return -EINVAL;
     }
 
     if (call_options & cib_sync_call) {
         ipc_flags |= crm_ipc_client_response;
     }
 
     cib->call_id++;
     /* prevent call_id from being negative (or zero) and conflicting
      *    with the cib_errors enum
      * use 2 because we use it as (cib->call_id - 1) below
      */
     if (cib->call_id < 1) {
         cib->call_id = 1;
     }
 
     CRM_CHECK(native->token != NULL,;
         );
     op_msg =
         cib_create_op(cib->call_id, native->token, op, host, section, data, call_options,
                       user_name);
     if (op_msg == NULL) {
         return -EPROTO;
     }
 
     crm_trace("Sending %s message to CIB service (timeout=%ds)", op, cib->call_timeout);
     rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, cib->call_timeout * 1000, &op_reply);
     free_xml(op_msg);
 
     if (rc < 0) {
         crm_err("Couldn't perform %s operation (timeout=%ds): %s (%d)", op,
                 cib->call_timeout, pcmk_strerror(rc), rc);
         rc = -ECOMM;
         goto done;
     }
 
     crm_log_xml_trace(op_reply, "Reply");
 
     if (!(call_options & cib_sync_call)) {
         crm_trace("Async call, returning %d", cib->call_id);
         CRM_CHECK(cib->call_id != 0, return -ENOMSG);
         free_xml(op_reply);
         return cib->call_id;
     }
 
     rc = pcmk_ok;
     crm_element_value_int(op_reply, F_CIB_CALLID, &reply_id);
     if (reply_id == cib->call_id) {
         xmlNode *tmp = get_message_xml(op_reply, F_CIB_CALLDATA);
 
         crm_trace("Syncronous reply %d received", reply_id);
         if (crm_element_value_int(op_reply, F_CIB_RC, &rc) != 0) {
             rc = -EPROTO;
         }
 
         if (output_data == NULL || (call_options & cib_discard_reply)) {
             crm_trace("Discarding reply");
 
         } else if (tmp != NULL) {
             *output_data = copy_xml(tmp);
         }
 
     } else if (reply_id <= 0) {
-        crm_err("Recieved bad reply: No id set");
+        crm_err("Received bad reply: No id set");
         crm_log_xml_err(op_reply, "Bad reply");
         rc = -ENOMSG;
         goto done;
 
     } else {
-        crm_err("Recieved bad reply: %d (wanted %d)", reply_id, cib->call_id);
+        crm_err("Received bad reply: %d (wanted %d)", reply_id, cib->call_id);
         crm_log_xml_err(op_reply, "Old reply");
         rc = -ENOMSG;
         goto done;
     }
 
     if (op_reply == NULL && cib->state == cib_disconnected) {
         rc = -ENOTCONN;
 
     } else if (rc == pcmk_ok && op_reply == NULL) {
         rc = -ETIME;
     }
 
     switch (rc) {
         case pcmk_ok:
         case -EPERM:
             break;
 
             /* This is an internal value that clients do not and should not care about */
         case -pcmk_err_diff_resync:
             rc = pcmk_ok;
             break;
 
             /* These indicate internal problems */
         case -EPROTO:
         case -ENOMSG:
             crm_err("Call failed: %s", pcmk_strerror(rc));
             if (op_reply) {
                 crm_log_xml_err(op_reply, "Invalid reply");
             }
             break;
 
         default:
             if (safe_str_neq(op, CIB_OP_QUERY)) {
                 crm_warn("Call failed: %s", pcmk_strerror(rc));
             }
     }
 
   done:
     if (crm_ipc_connected(native->ipc) == FALSE) {
         crm_err("CIB disconnected");
         cib->state = cib_disconnected;
     }
 
     free_xml(op_reply);
     return rc;
 }
 
 int
 cib_native_set_connection_dnotify(cib_t * cib, void (*dnotify) (gpointer user_data))
 {
     cib_native_opaque_t *native = NULL;
 
     if (cib == NULL) {
         crm_err("No CIB!");
         return FALSE;
     }
 
     native = cib->variant_opaque;
     native->dnotify_fn = dnotify;
 
     return pcmk_ok;
 }
 
 int
 cib_native_register_notification(cib_t * cib, const char *callback, int enabled)
 {
     int rc = pcmk_ok;
     xmlNode *notify_msg = create_xml_node(NULL, "cib-callback");
     cib_native_opaque_t *native = cib->variant_opaque;
 
     if (cib->state != cib_disconnected) {
         crm_xml_add(notify_msg, F_CIB_OPERATION, T_CIB_NOTIFY);
         crm_xml_add(notify_msg, F_CIB_NOTIFY_TYPE, callback);
         crm_xml_add_int(notify_msg, F_CIB_NOTIFY_ACTIVATE, enabled);
         rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response,
                           1000 * cib->call_timeout, NULL);
         if (rc <= 0) {
             crm_trace("Notification not registered: %d", rc);
             rc = -ECOMM;
         }
     }
 
     free_xml(notify_msg);
     return rc;
 }
diff --git a/lib/cluster/corosync.c b/lib/cluster/corosync.c
index a7886f52aa..e0a75abab0 100644
--- a/lib/cluster/corosync.c
+++ b/lib/cluster/corosync.c
@@ -1,555 +1,555 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 #include <bzlib.h>
 #include <sys/socket.h>
 #include <netinet/in.h>
 #include <arpa/inet.h>
 #include <netdb.h>
 
 #include <crm/common/ipc.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/mainloop.h>
 #include <sys/utsname.h>
 
 #include <qb/qbipcc.h>
 #include <qb/qbutil.h>
 
 #include <corosync/corodefs.h>
 #include <corosync/corotypes.h>
 #include <corosync/hdb.h>
 #include <corosync/cfg.h>
 #include <corosync/cmap.h>
 #include <corosync/quorum.h>
 
 #include <crm/msg_xml.h>
 
 quorum_handle_t pcmk_quorum_handle = 0;
 
 gboolean(*quorum_app_callback) (unsigned long long seq, gboolean quorate) = NULL;
 
 /*
  * CFG functionality stolen from node_name() in corosync-quorumtool.c
  * This resolves the first address assigned to a node and returns the name or IP address.
  */
 char *
 corosync_node_name(uint64_t /*cmap_handle_t */ cmap_handle, uint32_t nodeid)
 {
     int lpc = 0;
     int rc = CS_OK;
     int retries = 0;
     char *name = NULL;
     cmap_handle_t local_handle = 0;
 
     /* nodeid == 0 == CMAN_NODEID_US */
     if (nodeid == 0) {
         nodeid = get_local_nodeid(0);
     }
 
     if (cmap_handle == 0 && local_handle == 0) {
         retries = 0;
         crm_trace("Initializing CMAP connection");
         do {
             rc = cmap_initialize(&local_handle);
             if (rc != CS_OK) {
                 retries++;
                 crm_debug("API connection setup failed: %s.  Retrying in %ds", cs_strerror(rc),
                           retries);
                 sleep(retries);
             }
 
         } while (retries < 5 && rc != CS_OK);
 
         if (rc != CS_OK) {
             crm_warn("Could not connect to Cluster Configuration Database API, error %s",
                      cs_strerror(rc));
             local_handle = 0;
         }
     }
 
     if (cmap_handle == 0) {
         cmap_handle = local_handle;
     }
 
     while (name == NULL && cmap_handle != 0) {
         uint32_t id = 0;
         char *key = NULL;
 
         key = g_strdup_printf("nodelist.node.%d.nodeid", lpc);
         rc = cmap_get_uint32(cmap_handle, key, &id);
         crm_trace("Checking %u vs %u from %s", nodeid, id, key);
         g_free(key);
 
         if (rc != CS_OK) {
             break;
         }
 
         if (nodeid == id) {
             crm_trace("Searching for node name for %u in nodelist.node.%d %s", nodeid, lpc, name);
             if (name == NULL) {
                 key = g_strdup_printf("nodelist.node.%d.ring0_addr", lpc);
                 rc = cmap_get_string(cmap_handle, key, &name);
                 crm_trace("%s = %s", key, name);
 
                 if (node_name_is_valid(key, name) == FALSE) {
                     free(name);
                     name = NULL;
                 }
                 g_free(key);
             }
 
             if (name == NULL) {
                 key = g_strdup_printf("nodelist.node.%d.name", lpc);
                 rc = cmap_get_string(cmap_handle, key, &name);
                 crm_trace("%s = %s %d", key, name, rc);
                 g_free(key);
             }
             break;
         }
 
         lpc++;
     }
 
     if(local_handle) {
         cmap_finalize(local_handle);
     }
 
     if (name == NULL) {
         crm_notice("Unable to get node name for nodeid %u", nodeid);
     }
     return name;
 }
 
 void
 terminate_cs_connection(crm_cluster_t *cluster)
 {
     crm_notice("Disconnecting from Corosync");
 
     cluster_disconnect_cpg(cluster);
 
     if (pcmk_quorum_handle) {
         crm_trace("Disconnecting quorum");
         quorum_finalize(pcmk_quorum_handle);
         pcmk_quorum_handle = 0;
 
     } else {
         crm_info("No Quorum connection");
     }
 }
 
 int ais_membership_timer = 0;
 gboolean ais_membership_force = FALSE;
 
 
 static int
 pcmk_quorum_dispatch(gpointer user_data)
 {
     int rc = 0;
 
     rc = quorum_dispatch(pcmk_quorum_handle, CS_DISPATCH_ALL);
     if (rc < 0) {
         crm_err("Connection to the Quorum API failed: %d", rc);
         pcmk_quorum_handle = 0;
         return -1;
     }
     return 0;
 }
 
 static void
 pcmk_quorum_notification(quorum_handle_t handle,
                          uint32_t quorate,
                          uint64_t ring_id, uint32_t view_list_entries, uint32_t * view_list)
 {
     int i;
     GHashTableIter iter;
     crm_node_t *node = NULL;
     static gboolean init_phase = TRUE;
 
     if (quorate != crm_have_quorum) {
         crm_notice("Membership " U64T ": quorum %s (%lu)", ring_id,
                    quorate ? "acquired" : "lost", (long unsigned int)view_list_entries);
         crm_have_quorum = quorate;
 
     } else {
         crm_info("Membership " U64T ": quorum %s (%lu)", ring_id,
                  quorate ? "retained" : "still lost", (long unsigned int)view_list_entries);
     }
 
     if (view_list_entries == 0 && init_phase) {
         crm_info("Corosync membership is still forming, ignoring");
         return;
     }
 
     init_phase = FALSE;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         node->last_seen = 0;
     }
 
     for (i = 0; i < view_list_entries; i++) {
         uint32_t id = view_list[i];
         char *name = NULL;
 
         crm_debug("Member[%d] %u ", i, id);
 
         node = crm_get_peer(id, NULL);
         if (node->uname == NULL) {
             crm_info("Obtaining name for new node %u", id);
             name = corosync_node_name(0, id);
             node = crm_get_peer(id, name);
         }
 
         crm_update_peer_state(__FUNCTION__, node, CRM_NODE_MEMBER, ring_id);
         free(name);
     }
 
     crm_trace("Reaping unseen nodes...");
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         if (node->last_seen != ring_id && node->state) {
             crm_update_peer_state(__FUNCTION__, node, CRM_NODE_LOST, 0);
         } else if (node->last_seen != ring_id) {
             crm_info("State of node %s[%u] is still unknown", node->uname, node->id);
         }
     }
 
     if (quorum_app_callback) {
         quorum_app_callback(ring_id, quorate);
     }
 }
 
 quorum_callbacks_t quorum_callbacks = {
     .quorum_notify_fn = pcmk_quorum_notification,
 };
 
 gboolean
 cluster_connect_quorum(gboolean(*dispatch) (unsigned long long, gboolean),
                        void (*destroy) (gpointer))
 {
     int rc = -1;
     int fd = 0;
     int quorate = 0;
     uint32_t quorum_type = 0;
     struct mainloop_fd_callbacks quorum_fd_callbacks;
 
     quorum_fd_callbacks.dispatch = pcmk_quorum_dispatch;
     quorum_fd_callbacks.destroy = destroy;
 
     crm_debug("Configuring Pacemaker to obtain quorum from Corosync");
 
     rc = quorum_initialize(&pcmk_quorum_handle, &quorum_callbacks, &quorum_type);
     if (rc != CS_OK) {
         crm_err("Could not connect to the Quorum API: %d\n", rc);
         goto bail;
 
     } else if (quorum_type != QUORUM_SET) {
         crm_err("Corosync quorum is not configured\n");
         goto bail;
     }
 
     rc = quorum_getquorate(pcmk_quorum_handle, &quorate);
     if (rc != CS_OK) {
         crm_err("Could not obtain the current Quorum API state: %d\n", rc);
         goto bail;
     }
 
     crm_notice("Quorum %s", quorate ? "acquired" : "lost");
     quorum_app_callback = dispatch;
     crm_have_quorum = quorate;
 
     rc = quorum_trackstart(pcmk_quorum_handle, CS_TRACK_CHANGES | CS_TRACK_CURRENT);
     if (rc != CS_OK) {
         crm_err("Could not setup Quorum API notifications: %d\n", rc);
         goto bail;
     }
 
     rc = quorum_fd_get(pcmk_quorum_handle, &fd);
     if (rc != CS_OK) {
         crm_err("Could not obtain the Quorum API connection: %d\n", rc);
         goto bail;
     }
 
     mainloop_add_fd("quorum", G_PRIORITY_HIGH, fd, dispatch, &quorum_fd_callbacks);
 
     corosync_initialize_nodelist(NULL, FALSE, NULL);
 
   bail:
     if (rc != CS_OK) {
         quorum_finalize(pcmk_quorum_handle);
         return FALSE;
     }
     return TRUE;
 }
 
 gboolean
 init_cs_connection(crm_cluster_t * cluster)
 {
     int retries = 0;
 
     while (retries < 5) {
         int rc = init_cs_connection_once(cluster);
 
         retries++;
 
         switch (rc) {
             case CS_OK:
                 return TRUE;
                 break;
             case CS_ERR_TRY_AGAIN:
             case CS_ERR_QUEUE_FULL:
                 sleep(retries);
                 break;
             default:
                 return FALSE;
         }
     }
 
     crm_err("Could not connect to corosync after %d retries", retries);
     return FALSE;
 }
 
 gboolean
 init_cs_connection_once(crm_cluster_t * cluster)
 {
     crm_node_t *peer = NULL;
     enum cluster_type_e stack = get_cluster_type();
 
     crm_peer_init();
 
     /* Here we just initialize comms */
     if (stack != pcmk_cluster_corosync) {
         crm_err("Invalid cluster type: %s (%d)", name_for_cluster_type(stack), stack);
         return FALSE;
     }
 
     if (cluster_connect_cpg(cluster) == FALSE) {
         return FALSE;
     }
     crm_info("Connection to '%s': established", name_for_cluster_type(stack));
 
     cluster->nodeid = get_local_nodeid(0);
     if(cluster->nodeid == 0) {
         crm_err("Could not establish local nodeid");
         return FALSE;
     }
 
     cluster->uname = get_node_name(0);
     if(cluster->uname == NULL) {
         crm_err("Could not establish local node name");
         return FALSE;
     }
 
     /* Ensure the local node always exists */
     peer = crm_get_peer(cluster->nodeid, cluster->uname);
     cluster->uuid = get_corosync_uuid(peer);
 
     return TRUE;
 }
 
 gboolean
 check_message_sanity(const AIS_Message * msg, const char *data)
 {
     gboolean sane = TRUE;
     int dest = msg->host.type;
     int tmp_size = msg->header.size - sizeof(AIS_Message);
 
     if (sane && msg->header.size == 0) {
         crm_warn("Message with no size");
         sane = FALSE;
     }
 
     if (sane && msg->header.error != CS_OK) {
         crm_warn("Message header contains an error: %d", msg->header.error);
         sane = FALSE;
     }
 
     if (sane && ais_data_len(msg) != tmp_size) {
         crm_warn("Message payload size is incorrect: expected %d, got %d", ais_data_len(msg),
                  tmp_size);
         sane = TRUE;
     }
 
     if (sane && ais_data_len(msg) == 0) {
         crm_warn("Message with no payload");
         sane = FALSE;
     }
 
     if (sane && data && msg->is_compressed == FALSE) {
         int str_size = strlen(data) + 1;
 
         if (ais_data_len(msg) != str_size) {
             int lpc = 0;
 
             crm_warn("Message payload is corrupted: expected %d bytes, got %d",
                      ais_data_len(msg), str_size);
             sane = FALSE;
             for (lpc = (str_size - 10); lpc < msg->size; lpc++) {
                 if (lpc < 0) {
                     lpc = 0;
                 }
                 crm_debug("bad_data[%d]: %d / '%c'", lpc, data[lpc], data[lpc]);
             }
         }
     }
 
     if (sane == FALSE) {
         crm_err("Invalid message %d: (dest=%s:%s, from=%s:%s.%u, compressed=%d, size=%d, total=%d)",
                 msg->id, ais_dest(&(msg->host)), msg_type2text(dest),
                 ais_dest(&(msg->sender)), msg_type2text(msg->sender.type),
                 msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size);
 
     } else {
         crm_trace
-            ("Verfied message %d: (dest=%s:%s, from=%s:%s.%u, compressed=%d, size=%d, total=%d)",
+            ("Verified message %d: (dest=%s:%s, from=%s:%s.%u, compressed=%d, size=%d, total=%d)",
              msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)),
              msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed,
              ais_data_len(msg), msg->header.size);
     }
 
     return sane;
 }
 
 enum cluster_type_e
 find_corosync_variant(void)
 {
     int rc = CS_OK;
     cmap_handle_t handle;
 
     rc = cmap_initialize(&handle);
 
     switch(rc) {
         case CS_OK:
             break;
         case CS_ERR_SECURITY:
             crm_debug("Failed to initialize the cmap API: Permission denied (%d)", rc);
             /* It's there, we just can't talk to it.
              * Good enough for us to identify as 'corosync'
              */
             return pcmk_cluster_corosync;
 
         default:
             crm_info("Failed to initialize the cmap API: %s (%d)",
                      ais_error2text(rc), rc);
             return pcmk_cluster_unknown;
     }
 
     cmap_finalize(handle);
     return pcmk_cluster_corosync;
 }
 
 gboolean
 crm_is_corosync_peer_active(const crm_node_t * node)
 {
     if (node == NULL) {
         crm_trace("NULL");
         return FALSE;
 
     } else if (safe_str_neq(node->state, CRM_NODE_MEMBER)) {
         crm_trace("%s: state=%s", node->uname, node->state);
         return FALSE;
 
     } else if ((node->processes & crm_proc_cpg) == 0) {
         crm_trace("%s: processes=%.16x", node->uname, node->processes);
         return FALSE;
     }
     return TRUE;
 }
 
 gboolean
 corosync_initialize_nodelist(void *cluster, gboolean force_member, xmlNode * xml_parent)
 {
     int lpc = 0;
     int rc = CS_OK;
     int retries = 0;
     gboolean any = FALSE;
     cmap_handle_t cmap_handle;
 
     do {
         rc = cmap_initialize(&cmap_handle);
         if (rc != CS_OK) {
             retries++;
             crm_debug("API connection setup failed: %s.  Retrying in %ds", cs_strerror(rc),
                       retries);
             sleep(retries);
         }
 
     } while (retries < 5 && rc != CS_OK);
 
     if (rc != CS_OK) {
         crm_warn("Could not connect to Cluster Configuration Database API, error %d", rc);
         return FALSE;
     }
 
     crm_peer_init();
     crm_trace("Initializing corosync nodelist");
     for (lpc = 0;; lpc++) {
         uint32_t nodeid = 0;
         char *name = NULL;
         char *key = NULL;
 
         key = g_strdup_printf("nodelist.node.%d.nodeid", lpc);
         rc = cmap_get_uint32(cmap_handle, key, &nodeid);
         g_free(key);
 
         if (rc != CS_OK) {
             break;
         }
 
         name = corosync_node_name(cmap_handle, nodeid);
         if (name != NULL) {
             GHashTableIter iter;
             crm_node_t *node = NULL;
 
             g_hash_table_iter_init(&iter, crm_peer_cache);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
                 if(node && node->uname && strcasecmp(node->uname, name) == 0) {
                     if (node->id && node->id != nodeid) {
                         crm_crit("Nodes %u and %u share the same name '%s': shutting down", node->id,
                                  nodeid, name);
                         crm_exit(DAEMON_RESPAWN_STOP);
                     }
                 }
             }
         }
 
         if (nodeid > 0 || name != NULL) {
             crm_trace("Initializing node[%d] %u = %s", lpc, nodeid, name);
             crm_get_peer(nodeid, name);
         }
 
         if (nodeid > 0 && name != NULL) {
             any = TRUE;
 
             if (xml_parent) {
                 xmlNode *node = create_xml_node(xml_parent, XML_CIB_TAG_NODE);
 
                 crm_xml_add_int(node, XML_ATTR_ID, nodeid);
                 crm_xml_add(node, XML_ATTR_UNAME, name);
                 if (force_member) {
                     crm_xml_add(node, XML_ATTR_TYPE, CRM_NODE_MEMBER);
                 }
             }
         }
 
         free(name);
     }
     cmap_finalize(cmap_handle);
     return any;
 }
diff --git a/lib/cluster/legacy.c b/lib/cluster/legacy.c
index 0ebefa63cb..f3daee808d 100644
--- a/lib/cluster/legacy.c
+++ b/lib/cluster/legacy.c
@@ -1,946 +1,946 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 #include <crm/cluster/internal.h>
 #include <bzlib.h>
 #include <crm/common/ipc.h>
 #include <crm/cluster.h>
 #include <crm/common/mainloop.h>
 #include <sys/utsname.h>
 #include <sys/socket.h>
 #include <netdb.h>
 
 #if SUPPORT_COROSYNC
 #  include <corosync/confdb.h>
 #  include <corosync/corodefs.h>
 #  include <corosync/cpg.h>
 #  include <corosync/cfg.h>
 #endif
 
 #if HAVE_CMAP
 #  include <corosync/cmap.h>
 #endif
 
 #if SUPPORT_CMAN
 #  include <libcman.h>
 cman_handle_t pcmk_cman_handle = NULL;
 #endif
 
 int ais_membership_timer = 0;
 gboolean ais_membership_force = FALSE;
 int plugin_dispatch(gpointer user_data);
 
 int ais_fd_sync = -1;
 int ais_fd_async = -1;          /* never send messages via this channel */
 void *ais_ipc_ctx = NULL;
 
 hdb_handle_t ais_ipc_handle = 0;
 
 static gboolean
 plugin_get_details(uint32_t * id, char **uname)
 {
     struct iovec iov;
     int retries = 0;
     int rc = CS_OK;
     cs_ipc_header_response_t header;
     struct crm_ais_nodeid_resp_s answer;
 
     static uint32_t local_id = 0;
     static char *local_uname = NULL;
 
     if(local_id) {
         if(id) *id = local_id;
         if(uname) *uname = strdup(local_uname);
         return TRUE;
     }
 
     header.error = CS_OK;
     header.id = crm_class_nodeid;
     header.size = sizeof(cs_ipc_header_response_t);
 
     iov.iov_base = &header;
     iov.iov_len = header.size;
 
   retry:
     errno = 0;
     rc = coroipcc_msg_send_reply_receive(ais_ipc_handle, &iov, 1, &answer, sizeof(answer));
     if (rc == CS_OK) {
         CRM_CHECK(answer.header.size == sizeof(struct crm_ais_nodeid_resp_s),
                   crm_err("Odd message: id=%d, size=%d, error=%d",
                           answer.header.id, answer.header.size, answer.header.error));
         CRM_CHECK(answer.header.id == crm_class_nodeid,
                   crm_err("Bad response id: %d", answer.header.id));
     }
 
     if ((rc == CS_ERR_TRY_AGAIN || rc == CS_ERR_QUEUE_FULL) && retries < 20) {
         retries++;
         crm_info("Peer overloaded: Re-sending message (Attempt %d of 20)", retries);
         sleep(retries);         /* Proportional back off */
         goto retry;
     }
 
     if (rc != CS_OK) {
         crm_err("Sending nodeid request: FAILED (rc=%d): %s", rc, ais_error2text(rc));
         return FALSE;
 
     } else if (answer.header.error != CS_OK) {
         crm_err("Bad response from peer: (rc=%d): %s", rc, ais_error2text(rc));
         return FALSE;
     }
 
     crm_info("Server details: id=%u uname=%s cname=%s", answer.id, answer.uname, answer.cname);
 
     local_id = answer.id;
     local_uname = strdup(answer.uname);
 
     if(id) *id = local_id;
     if(uname) *uname = strdup(local_uname);
     return TRUE;
 }
 
 bool
 send_plugin_text(int class, struct iovec *iov)
 {
     int rc = CS_OK;
     int retries = 0;
     int buf_len = sizeof(cs_ipc_header_response_t);
     char *buf = malloc(buf_len);
     AIS_Message *ais_msg = (AIS_Message*)iov[0].iov_base;
     cs_ipc_header_response_t *header = (cs_ipc_header_response_t *) buf;
 
     CRM_ASSERT(buf != NULL);
     /* There are only 6 handlers registered to crm_lib_service in plugin.c */
     CRM_CHECK(class < 6, crm_err("Invalid message class: %d", class);
               return FALSE);
 
     do {
         if (rc == CS_ERR_TRY_AGAIN || rc == CS_ERR_QUEUE_FULL) {
             retries++;
             crm_info("Peer overloaded or membership in flux:"
                      " Re-sending message (Attempt %d of 20)", retries);
             sleep(retries);     /* Proportional back off */
         }
 
         errno = 0;
         rc = coroipcc_msg_send_reply_receive(ais_ipc_handle, iov, 1, buf, buf_len);
 
     } while ((rc == CS_ERR_TRY_AGAIN || rc == CS_ERR_QUEUE_FULL) && retries < 20);
 
     if (rc == CS_OK) {
         CRM_CHECK(header->size == sizeof(cs_ipc_header_response_t),
                   crm_err("Odd message: id=%d, size=%d, class=%d, error=%d",
                           header->id, header->size, class, header->error));
 
         CRM_ASSERT(buf_len >= header->size);
         CRM_CHECK(header->id == CRM_MESSAGE_IPC_ACK,
                   crm_err("Bad response id (%d) for request (%d)", header->id,
                           ais_msg->header.id));
         CRM_CHECK(header->error == CS_OK, rc = header->error);
 
     } else {
         crm_perror(LOG_ERR, "Sending plugin message %d FAILED: %s (%d)",
                    ais_msg->id, ais_error2text(rc), rc);
     }
 
     free(iov[0].iov_base);
     free(iov);
     free(buf);
 
     return (rc == CS_OK);
 }
 
 void
 terminate_cs_connection(crm_cluster_t *cluster)
 {
     crm_notice("Disconnecting from Corosync");
 
     if (is_classic_ais_cluster()) {
         if (ais_ipc_handle) {
             crm_trace("Disconnecting plugin");
             coroipcc_service_disconnect(ais_ipc_handle);
             ais_ipc_handle = 0;
         } else {
             crm_info("No plugin connection");
         }
     }
     cluster_disconnect_cpg(cluster);
 
 #  if SUPPORT_CMAN
     if (is_cman_cluster()) {
         if (pcmk_cman_handle) {
             crm_info("Disconnecting cman");
             if (cman_stop_notification(pcmk_cman_handle) >= 0) {
                 crm_info("Destroying cman");
                 cman_finish(pcmk_cman_handle);
             }
 
         } else {
             crm_info("No cman connection");
         }
     }
 #  endif
     ais_fd_async = -1;
     ais_fd_sync = -1;
 }
 
 void
 plugin_handle_membership(AIS_Message *msg)
 {
     if (msg->header.id == crm_class_members || msg->header.id == crm_class_quorum) {
         xmlNode *member = NULL;
         const char *value = NULL;
         gboolean quorate = FALSE;
         xmlNode *xml = string2xml(msg->data);
 
         if (xml == NULL) {
             crm_err("Invalid membership update: %s", msg->data);
             return;
         }
 
         value = crm_element_value(xml, "quorate");
         CRM_CHECK(value != NULL, crm_log_xml_err(xml, "No quorum value:"); return);
         if (crm_is_true(value)) {
             quorate = TRUE;
         }
 
         value = crm_element_value(xml, "id");
         CRM_CHECK(value != NULL, crm_log_xml_err(xml, "No membership id"); return);
         crm_peer_seq = crm_int_helper(value, NULL);
 
         if (quorate != crm_have_quorum) {
             crm_notice("Membership %s: quorum %s", value, quorate ? "acquired" : "lost");
             crm_have_quorum = quorate;
 
         } else {
             crm_info("Membership %s: quorum %s", value, quorate ? "retained" : "still lost");
         }
 
         for (member = __xml_first_child(xml); member != NULL; member = __xml_next(member)) {
             const char *id_s = crm_element_value(member, "id");
             const char *addr = crm_element_value(member, "addr");
             const char *uname = crm_element_value(member, "uname");
             const char *state = crm_element_value(member, "state");
             const char *born_s = crm_element_value(member, "born");
             const char *seen_s = crm_element_value(member, "seen");
             const char *votes_s = crm_element_value(member, "votes");
             const char *procs_s = crm_element_value(member, "processes");
 
             int votes = crm_int_helper(votes_s, NULL);
             unsigned int id = crm_int_helper(id_s, NULL);
             unsigned int procs = crm_int_helper(procs_s, NULL);
 
             /* TODO: These values will contain garbage if version < 0.7.1 */
             uint64_t born = crm_int_helper(born_s, NULL);
             uint64_t seen = crm_int_helper(seen_s, NULL);
 
             crm_update_peer(__FUNCTION__, id, born, seen, votes, procs, uname, uname, addr, state);
         }
         free_xml(xml);
     }
 }
 
 static void
 plugin_default_deliver_message(cpg_handle_t handle,
                                const struct cpg_name *groupName,
                                uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     uint32_t kind = 0;
     const char *from = NULL;
     char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
 
     free(data);
 }
 
 int
 plugin_dispatch(gpointer user_data)
 {
     int rc = CS_OK;
     crm_cluster_t *cluster = (crm_cluster_t *) user_data;
 
     do {
         char *buffer = NULL;
 
         rc = coroipcc_dispatch_get(ais_ipc_handle, (void **)&buffer, 0);
         if (rc == CS_ERR_TRY_AGAIN || rc == CS_ERR_QUEUE_FULL) {
             return 0;
         }
         if (rc != CS_OK) {
             crm_perror(LOG_ERR, "Receiving message body failed: (%d) %s", rc, ais_error2text(rc));
             return -1;
         }
         if (buffer == NULL) {
             /* NULL is a legal "no message afterall" value */
             return 0;
         }
         /*
         cpg_deliver_fn_t(cpg_handle_t handle, const struct cpg_name *group_name,
                          uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len);
         */
         if (cluster && cluster->cpg.cpg_deliver_fn) {
             cluster->cpg.cpg_deliver_fn(0, NULL, 0, 0, buffer, 0);
 
         } else {
             plugin_default_deliver_message(0, NULL, 0, 0, buffer, 0);
         }
 
         coroipcc_dispatch_put(ais_ipc_handle);
 
     } while (ais_ipc_handle);
 
     return 0;
 }
 
 static void
 plugin_destroy(gpointer user_data)
 {
     crm_err("AIS connection terminated");
     ais_fd_sync = -1;
     crm_exit(ENOTCONN);
 }
 
 #  if SUPPORT_CMAN
 
 static int
 pcmk_cman_dispatch(gpointer user_data)
 {
     int rc = cman_dispatch(pcmk_cman_handle, CMAN_DISPATCH_ALL);
 
     if (rc < 0) {
         crm_err("Connection to cman failed: %d", rc);
         pcmk_cman_handle = 0;
         return FALSE;
     }
     return TRUE;
 }
 
 #    define MAX_NODES 256
 
 static void
 cman_event_callback(cman_handle_t handle, void *privdata, int reason, int arg)
 {
     int rc = 0, lpc = 0, node_count = 0;
 
     cman_cluster_t cluster;
     static cman_node_t cman_nodes[MAX_NODES];
 
     gboolean(*dispatch) (unsigned long long, gboolean) = privdata;
 
     switch (reason) {
         case CMAN_REASON_STATECHANGE:
 
             memset(&cluster, 0, sizeof(cluster));
             rc = cman_get_cluster(pcmk_cman_handle, &cluster);
             if (rc < 0) {
                 crm_err("Couldn't query cman cluster details: %d %d", rc, errno);
                 return;
             }
 
             crm_peer_seq = cluster.ci_generation;
             if (arg != crm_have_quorum) {
                 crm_notice("Membership %llu: quorum %s", crm_peer_seq, arg ? "acquired" : "lost");
                 crm_have_quorum = arg;
 
             } else {
                 crm_info("Membership %llu: quorum %s", crm_peer_seq,
                          arg ? "retained" : "still lost");
             }
 
             rc = cman_get_nodes(pcmk_cman_handle, MAX_NODES, &node_count, cman_nodes);
             if (rc < 0) {
                 crm_err("Couldn't query cman node list: %d %d", rc, errno);
                 return;
             }
 
             for (lpc = 0; lpc < node_count; lpc++) {
                 crm_node_t *peer = NULL;
 
                 if (cman_nodes[lpc].cn_nodeid == 0) {
                     /* Never allow node ID 0 to be considered a member #315711 */
                     /* Skip entirely, its a qdisk */
                     continue;
                 }
 
                 peer = crm_get_peer(cman_nodes[lpc].cn_nodeid, cman_nodes[lpc].cn_name);
                 if(cman_nodes[lpc].cn_member) {
                     crm_update_peer_state(__FUNCTION__, peer, CRM_NODE_MEMBER, crm_peer_seq);
 
                 } else if(peer->state) {
                     crm_update_peer_state(__FUNCTION__, peer, CRM_NODE_LOST, 0);
 
                 } else {
                     crm_info("State of node %s[%u] is still unknown", peer->uname, peer->id);
                 }
             }
 
             if (dispatch) {
                 dispatch(crm_peer_seq, crm_have_quorum);
             }
             break;
 
         case CMAN_REASON_TRY_SHUTDOWN:
             /* Always reply with a negative - pacemaker needs to be stopped first */
             crm_notice("CMAN wants to shut down: %s", arg ? "forced" : "optional");
             cman_replyto_shutdown(pcmk_cman_handle, 0);
             break;
 
         case CMAN_REASON_CONFIG_UPDATE:
             /* Ignore */
             break;
     }
 }
 #  endif
 
 gboolean
 init_cman_connection(gboolean(*dispatch) (unsigned long long, gboolean), void (*destroy) (gpointer))
 {
 #  if SUPPORT_CMAN
     int rc = -1, fd = -1;
     cman_cluster_t cluster;
 
     struct mainloop_fd_callbacks cman_fd_callbacks = {
         .dispatch = pcmk_cman_dispatch,
         .destroy = destroy,
     };
 
     crm_info("Configuring Pacemaker to obtain quorum from cman");
 
     memset(&cluster, 0, sizeof(cluster));
 
     pcmk_cman_handle = cman_init(dispatch);
     if (pcmk_cman_handle == NULL || cman_is_active(pcmk_cman_handle) == FALSE) {
         crm_err("Couldn't connect to cman");
         goto cman_bail;
     }
 
     rc = cman_start_notification(pcmk_cman_handle, cman_event_callback);
     if (rc < 0) {
         crm_err("Couldn't register for cman notifications: %d %d", rc, errno);
         goto cman_bail;
     }
 
     /* Get the current membership state */
     cman_event_callback(pcmk_cman_handle, dispatch, CMAN_REASON_STATECHANGE,
                         cman_is_quorate(pcmk_cman_handle));
 
     fd = cman_get_fd(pcmk_cman_handle);
 
     mainloop_add_fd("cman", G_PRIORITY_MEDIUM, fd, dispatch, &cman_fd_callbacks);
 
   cman_bail:
     if (rc < 0) {
         cman_finish(pcmk_cman_handle);
         return FALSE;
     }
 #  else
     crm_err("cman qorum is not supported in this build");
     crm_exit(DAEMON_RESPAWN_STOP);
 #  endif
     return TRUE;
 }
 
 #  ifdef SUPPORT_COROSYNC
 
 gboolean
 cluster_connect_quorum(gboolean(*dispatch) (unsigned long long, gboolean),
                        void (*destroy) (gpointer))
 {
     crm_err("The Corosync quorum API is not supported in this build");
     crm_exit(DAEMON_RESPAWN_STOP);
     return TRUE;
 }
 
 static gboolean
 init_cs_connection_classic(crm_cluster_t * cluster)
 {
     int rc;
     int pid = 0;
     char *pid_s = NULL;
     const char *name = NULL;
     crm_node_t *peer = NULL;
     enum crm_proc_flag proc = 0;
 
     struct mainloop_fd_callbacks ais_fd_callbacks = {
         .dispatch = plugin_dispatch,
         .destroy = cluster->destroy,
     };
 
     crm_info("Creating connection to our Corosync plugin");
     rc = coroipcc_service_connect(COROSYNC_SOCKET_NAME, PCMK_SERVICE_ID,
                                   AIS_IPC_MESSAGE_SIZE, AIS_IPC_MESSAGE_SIZE, AIS_IPC_MESSAGE_SIZE,
                                   &ais_ipc_handle);
     if (ais_ipc_handle) {
         coroipcc_fd_get(ais_ipc_handle, &ais_fd_async);
     } else {
         crm_info("Connection to our Corosync plugin (%d) failed: %s (%d)",
                  PCMK_SERVICE_ID, strerror(errno), errno);
         return FALSE;
     }
     if (ais_fd_async <= 0 && rc == CS_OK) {
         crm_err("No context created, but connection reported 'ok'");
         rc = CS_ERR_LIBRARY;
     }
     if (rc != CS_OK) {
         crm_info("Connection to our Corosync plugin (%d) failed: %s (%d)", PCMK_SERVICE_ID,
                  ais_error2text(rc), rc);
     }
 
     if (rc != CS_OK) {
         return FALSE;
     }
 
     if (ais_fd_callbacks.destroy == NULL) {
         ais_fd_callbacks.destroy = plugin_destroy;
     }
 
     mainloop_add_fd("corosync-plugin", G_PRIORITY_MEDIUM, ais_fd_async, cluster, &ais_fd_callbacks);
     crm_info("AIS connection established");
 
     pid = getpid();
     pid_s = crm_itoa(pid);
     send_cluster_text(crm_class_cluster, pid_s, TRUE, NULL, crm_msg_ais);
     free(pid_s);
 
     cluster->nodeid = get_local_nodeid(0);
 
     name = get_local_node_name();
     plugin_get_details(NULL, &(cluster->uname));
     if (safe_str_neq(name, cluster->uname)) {
         crm_crit("Node name mismatch!  Corosync supplied %s but our lookup returned %s",
                  cluster->uname, name);
         crm_notice
             ("Node name mismatches usually occur when assigned automatically by DHCP servers");
         crm_exit(ENOTUNIQ);
     }
 
     proc = text2proc(crm_system_name);
     peer = crm_get_peer(cluster->nodeid, cluster->uname);
     crm_update_peer_proc(__FUNCTION__, peer, proc|crm_proc_plugin, ONLINESTATUS);
 
     return TRUE;
 }
 
 static int
 pcmk_mcp_dispatch(const char *buffer, ssize_t length, gpointer userdata)
 {
     xmlNode *msg = string2xml(buffer);
 
     if (msg && is_classic_ais_cluster()) {
         xmlNode *node = NULL;
 
         for (node = __xml_first_child(msg); node != NULL; node = __xml_next(node)) {
             int id = 0;
             int children = 0;
             const char *uname = crm_element_value(node, "uname");
 
             crm_element_value_int(node, "id", &id);
             crm_element_value_int(node, "processes", &children);
             if (id == 0) {
                 crm_log_xml_err(msg, "Bad Update");
             } else {
                 crm_node_t *peer = crm_get_peer(id, uname);
 
                 crm_update_peer_proc(__FUNCTION__, peer, children, NULL);
             }
         }
     }
 
     free_xml(msg);
     return 0;
 }
 
 static void
 pcmk_mcp_destroy(gpointer user_data)
 {
     void (*callback) (gpointer data) = user_data;
 
     if (callback) {
         callback(NULL);
     }
 }
 
 gboolean
 init_cs_connection(crm_cluster_t * cluster)
 {
     int retries = 0;
 
     static struct ipc_client_callbacks mcp_callbacks = {
         .dispatch = pcmk_mcp_dispatch,
         .destroy = pcmk_mcp_destroy
     };
 
     while (retries < 5) {
         int rc = init_cs_connection_once(cluster);
 
         retries++;
         switch (rc) {
             case CS_OK:
                 if (getenv("HA_mcp") && get_cluster_type() != pcmk_cluster_cman) {
                     xmlNode *poke = create_xml_node(NULL, "poke");
                     mainloop_io_t *ipc =
                         mainloop_add_ipc_client(CRM_SYSTEM_MCP, G_PRIORITY_MEDIUM, 0,
                                                 cluster->destroy, &mcp_callbacks);
 
                     crm_ipc_send(mainloop_get_ipc_client(ipc), poke, 0, 0, NULL);
                     free_xml(poke);
                 }
                 return TRUE;
                 break;
             case CS_ERR_TRY_AGAIN:
             case CS_ERR_QUEUE_FULL:
                 sleep(retries);
                 break;
             default:
                 return FALSE;
         }
     }
 
     crm_err("Retry count exceeded: %d", retries);
     return FALSE;
 }
 
 char *
 classic_node_name(uint32_t nodeid)
 {
     return NULL;                /* Always use the uname() default for localhost.  No way to look up peers */
 }
 
 char *
 cman_node_name(uint32_t nodeid)
 {
     char *name = NULL;
 
 #  if SUPPORT_CMAN
     cman_node_t us;
     cman_handle_t cman;
 
     cman = cman_init(NULL);
     if (cman != NULL && cman_is_active(cman)) {
         us.cn_name[0] = 0;
         cman_get_node(cman, nodeid, &us);
         name = strdup(us.cn_name);
         crm_info("Using CMAN node name %s for %u", name, nodeid);
     }
 
     cman_finish(cman);
 #  endif
 
     if (name == NULL) {
         crm_debug("Unable to get node name for nodeid %u", nodeid);
     }
     return name;
 }
 
 extern int set_cluster_type(enum cluster_type_e type);
 
 gboolean
 init_cs_connection_once(crm_cluster_t * cluster)
 {
     crm_node_t *peer = NULL;
     enum cluster_type_e stack = get_cluster_type();
 
     crm_peer_init();
 
     /* Here we just initialize comms */
     switch (stack) {
         case pcmk_cluster_classic_ais:
             if (init_cs_connection_classic(cluster) == FALSE) {
                 return FALSE;
             }
             break;
         case pcmk_cluster_cman:
             if (cluster_connect_cpg(cluster) == FALSE) {
                 return FALSE;
             }
             cluster->uname = cman_node_name(0 /* CMAN_NODEID_US */ );
             break;
         case pcmk_cluster_heartbeat:
             crm_info("Could not find an active corosync based cluster");
             return FALSE;
             break;
         default:
             crm_err("Invalid cluster type: %s (%d)", name_for_cluster_type(stack), stack);
             return FALSE;
             break;
     }
 
     crm_info("Connection to '%s': established", name_for_cluster_type(stack));
 
     cluster->nodeid = get_local_nodeid(0);
     if(cluster->nodeid == 0) {
         crm_err("Could not establish local nodeid");
         return FALSE;
     }
 
     cluster->uname = get_node_name(0);
     if(cluster->uname == NULL) {
         crm_err("Could not establish local node name");
         return FALSE;
     }
 
     /* Ensure the local node always exists */
     peer = crm_get_peer(cluster->nodeid, cluster->uname);
     cluster->uuid = get_corosync_uuid(peer);
 
     return TRUE;
 }
 
 gboolean
 check_message_sanity(const AIS_Message * msg, const char *data)
 {
     gboolean sane = TRUE;
     int dest = msg->host.type;
     int tmp_size = msg->header.size - sizeof(AIS_Message);
 
     if (sane && msg->header.size == 0) {
         crm_warn("Message with no size");
         sane = FALSE;
     }
 
     if (sane && msg->header.error != CS_OK) {
         crm_warn("Message header contains an error: %d", msg->header.error);
         sane = FALSE;
     }
 
     if (sane && ais_data_len(msg) != tmp_size) {
         crm_warn("Message payload size is incorrect: expected %d, got %d", ais_data_len(msg),
                  tmp_size);
         sane = TRUE;
     }
 
     if (sane && ais_data_len(msg) == 0) {
         crm_warn("Message with no payload");
         sane = FALSE;
     }
 
     if (sane && data && msg->is_compressed == FALSE) {
         int str_size = strlen(data) + 1;
 
         if (ais_data_len(msg) != str_size) {
             int lpc = 0;
 
             crm_warn("Message payload is corrupted: expected %d bytes, got %d",
                      ais_data_len(msg), str_size);
             sane = FALSE;
             for (lpc = (str_size - 10); lpc < msg->size; lpc++) {
                 if (lpc < 0) {
                     lpc = 0;
                 }
                 crm_debug("bad_data[%d]: %d / '%c'", lpc, data[lpc], data[lpc]);
             }
         }
     }
 
     if (sane == FALSE) {
         crm_err("Invalid message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)",
                 msg->id, ais_dest(&(msg->host)), msg_type2text(dest),
                 ais_dest(&(msg->sender)), msg_type2text(msg->sender.type),
                 msg->sender.pid, msg->is_compressed, ais_data_len(msg), msg->header.size);
 
     } else {
         crm_trace
-            ("Verfied message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)",
+            ("Verified message %d: (dest=%s:%s, from=%s:%s.%d, compressed=%d, size=%d, total=%d)",
              msg->id, ais_dest(&(msg->host)), msg_type2text(dest), ais_dest(&(msg->sender)),
              msg_type2text(msg->sender.type), msg->sender.pid, msg->is_compressed,
              ais_data_len(msg), msg->header.size);
     }
 
     return sane;
 }
 #endif
 
 static int
 get_config_opt(confdb_handle_t config,
                hdb_handle_t object_handle, const char *key, char **value, const char *fallback)
 {
     size_t len = 0;
     char *env_key = NULL;
     const char *env_value = NULL;
     char buffer[256];
 
     if (*value) {
         free(*value);
         *value = NULL;
     }
 
     if (object_handle > 0) {
         if (CS_OK == confdb_key_get(config, object_handle, key, strlen(key), &buffer, &len)) {
             *value = strdup(buffer);
         }
     }
 
     if (*value) {
         crm_info("Found '%s' for option: %s", *value, key);
         return 0;
     }
 
     env_key = crm_concat("HA", key, '_');
     env_value = getenv(env_key);
     free(env_key);
 
     if (*value) {
         crm_info("Found '%s' in ENV for option: %s", *value, key);
         *value = strdup(env_value);
         return 0;
     }
 
     if (fallback) {
         crm_info("Defaulting to '%s' for option: %s", fallback, key);
         *value = strdup(fallback);
 
     } else {
         crm_info("No default for option: %s", key);
     }
 
     return -1;
 }
 
 static confdb_handle_t
 config_find_init(confdb_handle_t config)
 {
     cs_error_t rc = CS_OK;
     confdb_handle_t local_handle = OBJECT_PARENT_HANDLE;
 
     rc = confdb_object_find_start(config, local_handle);
     if (rc == CS_OK) {
         return local_handle;
     } else {
         crm_err("Couldn't create search context: %d", rc);
     }
     return 0;
 }
 
 static hdb_handle_t
 config_find_next(confdb_handle_t config, const char *name, confdb_handle_t top_handle)
 {
     cs_error_t rc = CS_OK;
     hdb_handle_t local_handle = 0;
 
     if (top_handle == 0) {
         crm_err("Couldn't search for %s: no valid context", name);
         return 0;
     }
 
     crm_trace("Searching for %s in " HDB_X_FORMAT, name, top_handle);
     rc = confdb_object_find(config, top_handle, name, strlen(name), &local_handle);
     if (rc != CS_OK) {
         crm_info("No additional configuration supplied for: %s", name);
         local_handle = 0;
     } else {
         crm_info("Processing additional %s options...", name);
     }
     return local_handle;
 }
 
 enum cluster_type_e
 find_corosync_variant(void)
 {
     confdb_handle_t config;
     enum cluster_type_e found = pcmk_cluster_unknown;
 
     int rc;
     char *value = NULL;
     confdb_handle_t top_handle = 0;
     hdb_handle_t local_handle = 0;
     static confdb_callbacks_t callbacks = { };
 
     rc = confdb_initialize(&config, &callbacks);
     if (rc != CS_OK) {
         crm_debug("Could not initialize Cluster Configuration Database API instance error %d", rc);
         return found;
     }
 
     top_handle = config_find_init(config);
     local_handle = config_find_next(config, "service", top_handle);
     while (local_handle) {
         get_config_opt(config, local_handle, "name", &value, NULL);
         if (safe_str_eq("pacemaker", value)) {
             found = pcmk_cluster_classic_ais;
 
             get_config_opt(config, local_handle, "ver", &value, "0");
             crm_trace("Found Pacemaker plugin version: %s", value);
             break;
         }
 
         local_handle = config_find_next(config, "service", top_handle);
     }
 
     if (found == pcmk_cluster_unknown) {
         top_handle = config_find_init(config);
         local_handle = config_find_next(config, "quorum", top_handle);
         get_config_opt(config, local_handle, "provider", &value, NULL);
 
         if (safe_str_eq("quorum_cman", value)) {
             crm_trace("Found CMAN quorum provider");
             found = pcmk_cluster_cman;
         }
     }
     free(value);
 
     confdb_finalize(config);
     if (found == pcmk_cluster_unknown) {
         crm_err
             ("Corosync is running, but Pacemaker could not find the CMAN or Pacemaker plugin loaded");
         found = pcmk_cluster_invalid;
     }
     return found;
 }
 
 gboolean
 crm_is_corosync_peer_active(const crm_node_t * node)
 {
     enum crm_proc_flag proc = crm_proc_none;
 
     if (node == NULL) {
         crm_trace("NULL");
         return FALSE;
 
     } else if (safe_str_neq(node->state, CRM_NODE_MEMBER)) {
         crm_trace("%s: state=%s", node->uname, node->state);
         return FALSE;
 
     } else if (is_cman_cluster() && (node->processes & crm_proc_cpg)) {
         /* If we can still talk to our peer process on that node,
          * then its also part of the corosync membership
          */
         crm_trace("%s: processes=%.8x", node->uname, node->processes);
         return TRUE;
 
     } else if (is_classic_ais_cluster()) {
         if (node->processes < crm_proc_none) {
             crm_debug("%s: unknown process list, assuming active for now", node->uname);
             return TRUE;
 
         } else if (is_set(node->processes, crm_proc_none)) {
             crm_debug("%s: all processes are inactive", node->uname);
             return FALSE;
 
         } else if (is_not_set(node->processes, crm_proc_plugin)) {
             crm_trace("%s: processes=%.8x", node->uname, node->processes);
             return FALSE;
         }
     }
 
     proc = text2proc(crm_system_name);
     if (proc > crm_proc_none && (node->processes & proc) == 0) {
         crm_trace("%s: proc %.8x not in %.8x", node->uname, proc, node->processes);
         return FALSE;
     }
 
     return TRUE;
 }
diff --git a/lib/common/ipc.c b/lib/common/ipc.c
index 89dacb3390..e0d413025c 100644
--- a/lib/common/ipc.c
+++ b/lib/common/ipc.c
@@ -1,1162 +1,1162 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <grp.h>
 
 #include <errno.h>
 #include <fcntl.h>
 #include <bzlib.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 
 struct crm_ipc_response_header {
     struct qb_ipc_response_header qb;
     uint32_t size_uncompressed;
     uint32_t size_compressed;
     uint32_t flags;
 };
 
 static int hdr_offset = 0;
 static int ipc_buffer_max = 0;
 static int pick_ipc_buffer(int max);
 
 static inline void
 crm_ipc_init(void)
 {
     if (hdr_offset == 0) {
         hdr_offset = sizeof(struct crm_ipc_response_header);
     }
     if (ipc_buffer_max == 0) {
         ipc_buffer_max = pick_ipc_buffer(0);
     }
 }
 
 static char *
 generateReference(const char *custom1, const char *custom2)
 {
     static uint ref_counter = 0;
     const char *local_cust1 = custom1;
     const char *local_cust2 = custom2;
     int reference_len = 4;
     char *since_epoch = NULL;
 
     reference_len += 20;        /* too big */
     reference_len += 40;        /* too big */
 
     if (local_cust1 == NULL) {
         local_cust1 = "_empty_";
     }
     reference_len += strlen(local_cust1);
 
     if (local_cust2 == NULL) {
         local_cust2 = "_empty_";
     }
     reference_len += strlen(local_cust2);
 
     since_epoch = calloc(1, reference_len);
 
     if (since_epoch != NULL) {
         sprintf(since_epoch, "%s-%s-%ld-%u",
                 local_cust1, local_cust2, (unsigned long)time(NULL), ref_counter++);
     }
 
     return since_epoch;
 }
 
 xmlNode *
 create_request_adv(const char *task, xmlNode * msg_data,
                    const char *host_to, const char *sys_to,
                    const char *sys_from, const char *uuid_from, const char *origin)
 {
     char *true_from = NULL;
     xmlNode *request = NULL;
     char *reference = generateReference(task, sys_from);
 
     if (uuid_from != NULL) {
         true_from = generate_hash_key(sys_from, uuid_from);
     } else if (sys_from != NULL) {
         true_from = strdup(sys_from);
     } else {
         crm_err("No sys from specified");
     }
 
     /* host_from will get set for us if necessary by CRMd when routed */
     request = create_xml_node(NULL, __FUNCTION__);
     crm_xml_add(request, F_CRM_ORIGIN, origin);
     crm_xml_add(request, F_TYPE, T_CRM);
     crm_xml_add(request, F_CRM_VERSION, CRM_FEATURE_SET);
     crm_xml_add(request, F_CRM_MSG_TYPE, XML_ATTR_REQUEST);
     crm_xml_add(request, F_CRM_REFERENCE, reference);
     crm_xml_add(request, F_CRM_TASK, task);
     crm_xml_add(request, F_CRM_SYS_TO, sys_to);
     crm_xml_add(request, F_CRM_SYS_FROM, true_from);
 
     /* HOSTTO will be ignored if it is to the DC anyway. */
     if (host_to != NULL && strlen(host_to) > 0) {
         crm_xml_add(request, F_CRM_HOST_TO, host_to);
     }
 
     if (msg_data != NULL) {
         add_message_xml(request, F_CRM_DATA, msg_data);
     }
     free(reference);
     free(true_from);
 
     return request;
 }
 
 /*
  * This method adds a copy of xml_response_data
  */
 xmlNode *
 create_reply_adv(xmlNode * original_request, xmlNode * xml_response_data, const char *origin)
 {
     xmlNode *reply = NULL;
 
     const char *host_from = crm_element_value(original_request, F_CRM_HOST_FROM);
     const char *sys_from = crm_element_value(original_request, F_CRM_SYS_FROM);
     const char *sys_to = crm_element_value(original_request, F_CRM_SYS_TO);
     const char *type = crm_element_value(original_request, F_CRM_MSG_TYPE);
     const char *operation = crm_element_value(original_request, F_CRM_TASK);
     const char *crm_msg_reference = crm_element_value(original_request, F_CRM_REFERENCE);
 
     if (type == NULL) {
         crm_err("Cannot create new_message, no message type in original message");
         CRM_ASSERT(type != NULL);
         return NULL;
 #if 0
     } else if (strcasecmp(XML_ATTR_REQUEST, type) != 0) {
         crm_err("Cannot create new_message, original message was not a request");
         return NULL;
 #endif
     }
     reply = create_xml_node(NULL, __FUNCTION__);
     if (reply == NULL) {
         crm_err("Cannot create new_message, malloc failed");
         return NULL;
     }
 
     crm_xml_add(reply, F_CRM_ORIGIN, origin);
     crm_xml_add(reply, F_TYPE, T_CRM);
     crm_xml_add(reply, F_CRM_VERSION, CRM_FEATURE_SET);
     crm_xml_add(reply, F_CRM_MSG_TYPE, XML_ATTR_RESPONSE);
     crm_xml_add(reply, F_CRM_REFERENCE, crm_msg_reference);
     crm_xml_add(reply, F_CRM_TASK, operation);
 
     /* since this is a reply, we reverse the from and to */
     crm_xml_add(reply, F_CRM_SYS_TO, sys_from);
     crm_xml_add(reply, F_CRM_SYS_FROM, sys_to);
 
     /* HOSTTO will be ignored if it is to the DC anyway. */
     if (host_from != NULL && strlen(host_from) > 0) {
         crm_xml_add(reply, F_CRM_HOST_TO, host_from);
     }
 
     if (xml_response_data != NULL) {
         add_message_xml(reply, F_CRM_DATA, xml_response_data);
     }
 
     return reply;
 }
 
 /* Libqb based IPC */
 
 /* Server... */
 
 GHashTable *client_connections = NULL;
 
 crm_client_t *
 crm_client_get(qb_ipcs_connection_t * c)
 {
     if (client_connections) {
         return g_hash_table_lookup(client_connections, c);
     }
 
     crm_trace("No client found for %p", c);
     return NULL;
 }
 
 crm_client_t *
 crm_client_get_by_id(const char *id)
 {
     gpointer key;
     crm_client_t *client;
     GHashTableIter iter;
 
     if (client_connections && id) {
         g_hash_table_iter_init(&iter, client_connections);
         while (g_hash_table_iter_next(&iter, &key, (gpointer *) & client)) {
             if (strcmp(client->id, id) == 0) {
                 return client;
             }
         }
     }
 
     crm_trace("No client found with id=%s", id);
     return NULL;
 }
 
 const char *
 crm_client_name(crm_client_t * c)
 {
     if (c == NULL) {
         return "null";
     } else if (c->name == NULL && c->id == NULL) {
         return "unknown";
     } else if (c->name == NULL) {
         return c->id;
     } else {
         return c->name;
     }
 }
 
 void
 crm_client_init(void)
 {
     if (client_connections == NULL) {
         crm_trace("Creating client hash table");
         client_connections = g_hash_table_new(g_direct_hash, g_direct_equal);
     }
 }
 
 void
 crm_client_cleanup(void)
 {
     if (client_connections != NULL) {
         int active = g_hash_table_size(client_connections);
 
         if (active) {
             crm_err("Exiting with %d active connections", active);
         }
         g_hash_table_destroy(client_connections); client_connections = NULL;
     }
 }
 
 void
 crm_client_disconnect_all(qb_ipcs_service_t *service)
 {
     qb_ipcs_connection_t *c = qb_ipcs_connection_first_get(service);
 
     while (c != NULL) {
         qb_ipcs_connection_t *last = c;
 
         c = qb_ipcs_connection_next_get(service, last);
 
         /* There really shouldn't be anyone connected at this point */
         crm_notice("Disconnecting client %p, pid=%d...", last, crm_ipcs_client_pid(last));
         qb_ipcs_disconnect(last);
         qb_ipcs_connection_unref(last);
     }
 }
 
 crm_client_t *
 crm_client_new(qb_ipcs_connection_t * c, uid_t uid_client, gid_t gid_client)
 {
     static uid_t uid_server = 0;
     static gid_t gid_cluster = 0;
 
     crm_client_t *client = NULL;
 
     CRM_LOG_ASSERT(c);
     if (c == NULL) {
         return NULL;
     }
 
     if (gid_cluster == 0) {
         uid_server = getuid();
         if(crm_user_lookup(CRM_DAEMON_USER, NULL, &gid_cluster) < 0) {
             static bool have_error = FALSE;
             if(have_error == FALSE) {
                 crm_warn("Could not find group for user %s", CRM_DAEMON_USER);
                 have_error = TRUE;
             }
         }
     }
 
     if(gid_cluster != 0 && gid_client != 0) {
         uid_t best_uid = -1; /* Passing -1 to chown(2) means don't change */
 
         if(uid_client == 0 || uid_server == 0) { /* Someone is priveliged, but the other may not be */
             best_uid = QB_MAX(uid_client, uid_server);
             crm_trace("Allowing user %u to clean up after disconnect", best_uid);
         }
 
         crm_trace("Giving access to group %u", gid_cluster);
         qb_ipcs_connection_auth_set(c, best_uid, gid_cluster, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP);
     }
 
     crm_client_init();
 
     client = calloc(1, sizeof(crm_client_t));
 
     client->ipcs = c;
     client->kind = CRM_CLIENT_IPC;
     client->pid = crm_ipcs_client_pid(c);
 
     client->id = crm_generate_uuid();
 
     crm_info("Connecting %p for uid=%d gid=%d pid=%u id=%s", c, uid_client, gid_client, client->pid, client->id);
 
 #if ENABLE_ACL
     client->user = uid2username(uid_client);
 #endif
 
     g_hash_table_insert(client_connections, c, client);
     return client;
 }
 
 void
 crm_client_destroy(crm_client_t * c)
 {
     if (c == NULL) {
         return;
     }
 
     if (client_connections) {
         if (c->ipcs) {
             crm_trace("Destroying %p/%p (%d remaining)",
                       c, c->ipcs, crm_hash_table_size(client_connections) - 1);
             g_hash_table_remove(client_connections, c->ipcs);
 
         } else {
             crm_trace("Destroying remote connection %p (%d remaining)",
                       c, crm_hash_table_size(client_connections) - 1);
             g_hash_table_remove(client_connections, c->id);
         }
     }
 
     if (c->event_timer) {
         g_source_remove(c->event_timer);
     }
 
     crm_info("Destroying %d events", g_list_length(c->event_queue));
     while (c->event_queue) {
         struct iovec *event = c->event_queue->data;
 
         c->event_queue = g_list_remove(c->event_queue, event);
         free(event[0].iov_base);
         free(event[1].iov_base);
         free(event);
     }
 
     free(c->id);
     free(c->name);
     free(c->user);
     if (c->remote) {
         if (c->remote->auth_timeout) {
             g_source_remove(c->remote->auth_timeout);
         }
         free(c->remote->buffer);
         free(c->remote);
     }
     free(c);
 }
 
 int
 crm_ipcs_client_pid(qb_ipcs_connection_t * c)
 {
     struct qb_ipcs_connection_stats stats;
 
     stats.client_pid = 0;
     qb_ipcs_connection_stats_get(c, &stats, 0);
     return stats.client_pid;
 }
 
 xmlNode *
 crm_ipcs_recv(crm_client_t * c, void *data, size_t size, uint32_t * id, uint32_t * flags)
 {
     xmlNode *xml = NULL;
     char *uncompressed = NULL;
     char *text = ((char *)data) + sizeof(struct crm_ipc_response_header);
     struct crm_ipc_response_header *header = data;
 
     if (id) {
         *id = ((struct qb_ipc_response_header *)data)->id;
     }
     if (flags) {
         *flags = header->flags;
     }
 
     if (header->flags & crm_ipc_compressed) {
         int rc = 0;
         unsigned int size_u = 1 + header->size_uncompressed;
         uncompressed = calloc(1, hdr_offset + size_u);
 
         crm_trace("Decompressing message data %d bytes into %d bytes",
                   header->size_compressed, size_u);
 
         rc = BZ2_bzBuffToBuffDecompress(uncompressed, &size_u, text, header->size_compressed, 1, 0);
         text = uncompressed;
 
         if (rc != BZ_OK) {
             crm_err("Decompression failed: %s (%d)", bz2_strerror(rc), rc);
             free(uncompressed);
             return NULL;
         }
     }
 
     CRM_ASSERT(text[header->size_uncompressed - 1] == 0);
 
     crm_trace("Received %.200s", text);
     xml = string2xml(text);
 
     free(uncompressed);
     return xml;
 }
 
 ssize_t crm_ipcs_flush_events(crm_client_t * c);
 
 static gboolean
 crm_ipcs_flush_events_cb(gpointer data)
 {
     crm_client_t *c = data;
 
     c->event_timer = 0;
     crm_ipcs_flush_events(c);
     return FALSE;
 }
 
 ssize_t
 crm_ipcs_flush_events(crm_client_t * c)
 {
     int sent = 0;
     ssize_t rc = 0;
     int queue_len = 0;
 
     if (c == NULL) {
         return pcmk_ok;
 
     } else if (c->event_timer) {
         /* There is already a timer, wait until it goes off */
         crm_trace("Timer active for %p - %d", c->ipcs, c->event_timer);
         return pcmk_ok;
     }
 
     queue_len = g_list_length(c->event_queue);
     while (c->event_queue && sent < 100) {
         struct crm_ipc_response_header *header = NULL;
         struct iovec *event = c->event_queue->data;
 
         rc = qb_ipcs_event_sendv(c->ipcs, event, 2);
         if (rc < 0) {
             break;
         }
 
         sent++;
         header = event[0].iov_base;
         if (header->flags & crm_ipc_compressed) {
             crm_trace("Event %d to %p[%d] (%d compressed bytes) sent",
                       header->qb.id, c->ipcs, c->pid, rc);
         } else {
             crm_trace("Event %d to %p[%d] (%d bytes) sent: %.120s",
                       header->qb.id, c->ipcs, c->pid, rc, event[1].iov_base);
         }
 
         c->event_queue = g_list_remove(c->event_queue, event);
         free(event[0].iov_base);
         free(event[1].iov_base);
         free(event);
     }
 
     queue_len -= sent;
     if (sent > 0 || c->event_queue) {
         crm_trace("Sent %d events (%d remaining) for %p[%d]: %s",
                   sent, queue_len, c->ipcs, c->pid, pcmk_strerror(rc < 0 ? rc : 0));
     }
 
     if (c->event_queue) {
         if (queue_len % 100 == 0 && queue_len > 99) {
             crm_warn("Event queue for %p[%d] has grown to %d", c->ipcs, c->pid, queue_len);
 
         } else if (queue_len > 500) {
             crm_err("Evicting slow client %p[%d]: event queue reached %d entries",
                     c->ipcs, c->pid, queue_len);
             qb_ipcs_disconnect(c->ipcs);
             return rc;
         }
 
         c->event_timer = g_timeout_add(1000 + 100 * queue_len, crm_ipcs_flush_events_cb, c);
     }
 
     return rc;
 }
 
 ssize_t
 crm_ipc_prepare(uint32_t request, xmlNode * message, struct iovec ** result)
 {
     static int biggest = 0;
 
     struct iovec *iov;
     unsigned int total = 0;
     char *compressed = NULL;
     char *buffer = dump_xml_unformatted(message);
     struct crm_ipc_response_header *header = calloc(1, sizeof(struct crm_ipc_response_header));
 
     CRM_ASSERT(result != NULL);
 
     *result = NULL;
     iov = calloc(2, sizeof(struct iovec));
 
     crm_ipc_init();
 
     iov[0].iov_len = hdr_offset;
     iov[0].iov_base = header;
 
     header->size_uncompressed = 1 + strlen(buffer);
     total = hdr_offset + header->size_uncompressed;
 
     if (total < ipc_buffer_max) {
         iov[1].iov_base = buffer;
         iov[1].iov_len = header->size_uncompressed;
 
     } else {
         unsigned int new_size = 0;
 
         if (total > biggest) {
             biggest = 2 * QB_MAX(total, biggest);
             crm_notice("Message exceeds the configured ipc limit (%d bytes), "
                        "consider configuring PCMK_ipc_buffer to %d or higher "
                        "to avoid compression overheads", ipc_buffer_max, biggest);
         }
 
         if (crm_compress_string
             (buffer, header->size_uncompressed, ipc_buffer_max, &compressed, &new_size)) {
 
             header->flags |= crm_ipc_compressed;
             header->size_compressed = new_size;
 
             iov[1].iov_len = header->size_compressed;
             iov[1].iov_base = compressed;
 
             free(buffer);
 
         } else {
             ssize_t rc = -EMSGSIZE;
 
             crm_log_xml_trace(message, "EMSGSIZE");
 
             crm_err
                 ("Could not compress the message into less than the configured ipc limit (%d bytes)."
                  "Set PCMK_ipc_buffer to a higher value (%d bytes suggested)", ipc_buffer_max,
                  biggest);
 
             free(compressed);
             free(buffer);
             free(header);
             free(iov);
 
             return rc;
         }
     }
 
     header->qb.size = iov[0].iov_len + iov[1].iov_len;
     header->qb.id = (int32_t)request;    /* Replying to a specific request */
 
     *result = iov;
     return header->qb.size;
 }
 
 ssize_t
 crm_ipcs_sendv(crm_client_t * c, struct iovec * iov, enum crm_ipc_server_flags flags)
 {
     ssize_t rc;
     static uint32_t id = 1;
     struct crm_ipc_response_header *header = iov[0].iov_base;
 
     if (flags & crm_ipc_server_event) {
         header->qb.id = id++;   /* We don't really use it, but doesn't hurt to set one */
 
         if (flags & crm_ipc_server_free) {
             crm_trace("Sending the original to %p[%d]", c->ipcs, c->pid);
             c->event_queue = g_list_append(c->event_queue, iov);
 
         } else {
             struct iovec *iov_copy = calloc(2, sizeof(struct iovec));
 
             crm_trace("Sending a copy to %p[%d]", c->ipcs, c->pid);
             iov_copy[0].iov_len = iov[0].iov_len;
             iov_copy[0].iov_base = malloc(iov[0].iov_len);
             memcpy(iov_copy[0].iov_base, iov[0].iov_base, iov[0].iov_len);
 
             iov_copy[1].iov_len = iov[1].iov_len;
             iov_copy[1].iov_base = malloc(iov[1].iov_len);
             memcpy(iov_copy[1].iov_base, iov[1].iov_base, iov[1].iov_len);
 
             c->event_queue = g_list_append(c->event_queue, iov_copy);
         }
 
     } else {
         CRM_LOG_ASSERT(header->qb.id != 0);     /* Replying to a specific request */
 
         rc = qb_ipcs_response_sendv(c->ipcs, iov, 2);
         if (rc < header->qb.size) {
             crm_notice("Response %d to %p[%d] (%d bytes) failed: %s (%d)",
                        header->qb.id, c->ipcs, c->pid, header->qb.size, pcmk_strerror(rc), rc);
 
         } else {
             crm_trace("Response %d sent, %d bytes to %p[%d]", header->qb.id, rc, c->ipcs, c->pid);
         }
 
         if (flags & crm_ipc_server_free) {
             free(iov[0].iov_base);
             free(iov[1].iov_base);
             free(iov);
         }
     }
 
     if (flags & crm_ipc_server_event) {
         rc = crm_ipcs_flush_events(c);
     } else {
         crm_ipcs_flush_events(c);
     }
 
     if (rc == -EPIPE || rc == -ENOTCONN) {
         crm_trace("Client %p disconnected", c->ipcs);
     }
 
     return rc;
 }
 
 ssize_t
 crm_ipcs_send(crm_client_t * c, uint32_t request, xmlNode * message,
               enum crm_ipc_server_flags flags)
 {
     struct iovec *iov = NULL;
     ssize_t rc = 0;
 
     if(c == NULL) {
         return -EDESTADDRREQ;
     }
 
     rc = crm_ipc_prepare(request, message, &iov);
     if (rc > 0) {
         rc = crm_ipcs_sendv(c, iov, flags | crm_ipc_server_free);
 
     } else {
         free(iov);
         crm_notice("Message to %p[%d] failed: %s (%d)",
                    c->ipcs, c->pid, pcmk_strerror(rc), rc);
     }
 
     return rc;
 }
 
 void
 crm_ipcs_send_ack(crm_client_t * c, uint32_t request, const char *tag, const char *function,
                   int line)
 {
     xmlNode *ack = create_xml_node(NULL, tag);
 
     crm_xml_add(ack, "function", function);
     crm_xml_add_int(ack, "line", line);
     crm_ipcs_send(c, request, ack, 0);
     free_xml(ack);
 }
 
 /* Client... */
 
 #define MIN_MSG_SIZE    12336   /* sizeof(struct qb_ipc_connection_response) */
 #define MAX_MSG_SIZE    50*1024 /* 50k default */
 
 struct crm_ipc_s {
     struct pollfd pfd;
 
     int buf_size;
     int msg_size;
     int need_reply;
     char *buffer;
     char *name;
 
     qb_ipcc_connection_t *ipc;
 
 };
 
 static int
 pick_ipc_buffer(int max)
 {
     const char *env = getenv("PCMK_ipc_buffer");
 
     if (env) {
         max = crm_parse_int(env, "0");
     }
 
     if (max <= 0) {
         max = MAX_MSG_SIZE;
     }
 
     if (max < MIN_MSG_SIZE) {
         max = MIN_MSG_SIZE;
     }
 
     crm_trace("Using max message size of %d", max);
     return max;
 }
 
 crm_ipc_t *
 crm_ipc_new(const char *name, size_t max_size)
 {
     crm_ipc_t *client = NULL;
 
     client = calloc(1, sizeof(crm_ipc_t));
 
     client->name = strdup(name);
     client->buf_size = pick_ipc_buffer(max_size);
     client->buffer = malloc(client->buf_size);
 
     client->pfd.fd = -1;
     client->pfd.events = POLLIN;
     client->pfd.revents = 0;
 
     return client;
 }
 
 bool
 crm_ipc_connect(crm_ipc_t * client)
 {
     client->need_reply = FALSE;
     client->ipc = qb_ipcc_connect(client->name, client->buf_size);
 
     if (client->ipc == NULL) {
         crm_perror(LOG_INFO, "Could not establish %s connection", client->name);
         return FALSE;
     }
 
     client->pfd.fd = crm_ipc_get_fd(client);
     if (client->pfd.fd < 0) {
         crm_perror(LOG_INFO, "Could not obtain file descriptor for %s connection", client->name);
         return FALSE;
     }
 
     qb_ipcc_context_set(client->ipc, client);
 
     return TRUE;
 }
 
 void
 crm_ipc_close(crm_ipc_t * client)
 {
     if (client) {
         crm_trace("Disconnecting %s IPC connection %p (%p.%p)", client->name, client, client->ipc);
 
         if (client->ipc) {
             qb_ipcc_connection_t *ipc = client->ipc;
 
             client->ipc = NULL;
             qb_ipcc_disconnect(ipc);
         }
     }
 }
 
 void
 crm_ipc_destroy(crm_ipc_t * client)
 {
     if (client) {
         if (client->ipc && qb_ipcc_is_connected(client->ipc)) {
             crm_notice("Destroying an active IPC connection to %s", client->name);
             /* The next line is basically unsafe
              *
              * If this connection was attached to mainloop and mainloop is active,
              *   the 'disconnected' callback will end up back here and we'll end
              *   up free'ing the memory twice - something that can still happen
              *   even without this if we destroy a connection and it closes before
              *   we call exit
              */
             /* crm_ipc_close(client); */
         }
         crm_trace("Destroying IPC connection to %s: %p", client->name, client);
         free(client->buffer);
         free(client->name);
         free(client);
     }
 }
 
 int
 crm_ipc_get_fd(crm_ipc_t * client)
 {
     int fd = 0;
 
     CRM_ASSERT(client != NULL);
     if (client->ipc && qb_ipcc_fd_get(client->ipc, &fd) == 0) {
         return fd;
     }
 
     crm_perror(LOG_ERR, "Could not obtain file IPC descriptor for %s", client->name);
     return -EINVAL;
 }
 
 bool
 crm_ipc_connected(crm_ipc_t * client)
 {
     bool rc = FALSE;
 
     if (client == NULL) {
         crm_trace("No client");
         return FALSE;
 
     } else if (client->ipc == NULL) {
         crm_trace("No connection");
         return FALSE;
 
     } else if (client->pfd.fd < 0) {
         crm_trace("Bad descriptor");
         return FALSE;
     }
 
     rc = qb_ipcc_is_connected(client->ipc);
     if (rc == FALSE) {
         client->pfd.fd = -EINVAL;
     }
     return rc;
 }
 
 int
 crm_ipc_ready(crm_ipc_t * client)
 {
     CRM_ASSERT(client != NULL);
 
     if (crm_ipc_connected(client) == FALSE) {
         return -ENOTCONN;
     }
 
     client->pfd.revents = 0;
     return poll(&(client->pfd), 1, 0);
 }
 
 static int
 crm_ipc_decompress(crm_ipc_t * client)
 {
     struct crm_ipc_response_header *header = (struct crm_ipc_response_header *)client->buffer;
 
     if (header->flags & crm_ipc_compressed) {
         int rc = 0;
         unsigned int size_u = 1 + header->size_uncompressed;
         char *uncompressed = calloc(1, hdr_offset + size_u);
 
         crm_trace("Decompressing message data %d bytes into %d bytes",
                  header->size_compressed, size_u);
 
         rc = BZ2_bzBuffToBuffDecompress(uncompressed + hdr_offset, &size_u,
                                         client->buffer + hdr_offset, header->size_compressed, 1, 0);
 
         if (rc != BZ_OK) {
             crm_err("Decompression failed: %s (%d)", bz2_strerror(rc), rc);
             free(uncompressed);
             return -EILSEQ;
         }
 
         CRM_ASSERT((header->size_uncompressed + hdr_offset) >= ipc_buffer_max);
         CRM_ASSERT(size_u == header->size_uncompressed);
 
         memcpy(uncompressed, client->buffer, hdr_offset);       /* Preserve the header */
         header = (struct crm_ipc_response_header *)uncompressed;
 
         free(client->buffer);
         client->buf_size = hdr_offset + size_u;
         client->buffer = uncompressed;
     }
 
     CRM_ASSERT(client->buffer[hdr_offset + header->size_uncompressed - 1] == 0);
     return pcmk_ok;
 }
 
 long
 crm_ipc_read(crm_ipc_t * client)
 {
     struct crm_ipc_response_header *header = NULL;
 
     CRM_ASSERT(client != NULL);
     CRM_ASSERT(client->ipc != NULL);
     CRM_ASSERT(client->buffer != NULL);
 
     crm_ipc_init();
 
     client->buffer[0] = 0;
     client->msg_size = qb_ipcc_event_recv(client->ipc, client->buffer, client->buf_size - 1, 0);
     if (client->msg_size >= 0) {
         int rc = crm_ipc_decompress(client);
 
         if (rc != pcmk_ok) {
             return rc;
         }
 
         header = (struct crm_ipc_response_header *)client->buffer;
-        crm_trace("Recieved %s event %d, size=%d, rc=%d, text: %.100s",
+        crm_trace("Received %s event %d, size=%d, rc=%d, text: %.100s",
                   client->name, header->qb.id, header->qb.size, client->msg_size,
                   client->buffer + hdr_offset);
 
     } else {
-        crm_trace("No message from %s recieved: %s", client->name, pcmk_strerror(client->msg_size));
+        crm_trace("No message from %s received: %s", client->name, pcmk_strerror(client->msg_size));
     }
 
     if (crm_ipc_connected(client) == FALSE || client->msg_size == -ENOTCONN) {
         crm_err("Connection to %s failed", client->name);
     }
 
     if (header) {
         /* Data excluding the header */
         return header->size_uncompressed;
     }
     return -ENOMSG;
 }
 
 const char *
 crm_ipc_buffer(crm_ipc_t * client)
 {
     CRM_ASSERT(client != NULL);
     return client->buffer + sizeof(struct crm_ipc_response_header);
 }
 
 const char *
 crm_ipc_name(crm_ipc_t * client)
 {
     CRM_ASSERT(client != NULL);
     return client->name;
 }
 
 static int
 internal_ipc_send_recv(crm_ipc_t * client, const void *iov)
 {
     int rc = 0;
 
     do {
         rc = qb_ipcc_sendv_recv(client->ipc, iov, 2, client->buffer, client->buf_size, -1);
     } while (rc == -EAGAIN && crm_ipc_connected(client));
 
     return rc;
 }
 
 static int
 internal_ipc_send_request(crm_ipc_t * client, const void *iov, int ms_timeout)
 {
     int rc = 0;
     time_t timeout = time(NULL) + 1 + (ms_timeout / 1000);
 
     do {
         rc = qb_ipcc_sendv(client->ipc, iov, 2);
     } while (rc == -EAGAIN && time(NULL) < timeout && crm_ipc_connected(client));
 
     return rc;
 }
 
 static int
 internal_ipc_get_reply(crm_ipc_t * client, int request_id, int ms_timeout)
 {
     time_t timeout = time(NULL) + 1 + (ms_timeout / 1000);
     int rc = 0;
 
     crm_ipc_init();
 
     /* get the reply */
     crm_trace("client %s waiting on reply to msg id %d", client->name, request_id);
     do {
 
         rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, 1000);
         if (rc > 0) {
             struct crm_ipc_response_header *hdr = NULL;
 
             int rc = crm_ipc_decompress(client);
 
             if (rc != pcmk_ok) {
                 return rc;
             }
 
             hdr = (struct crm_ipc_response_header *)client->buffer;
             if (hdr->qb.id == request_id) {
                 /* Got it */
                 break;
             } else if (hdr->qb.id < request_id) {
                 xmlNode *bad = string2xml(crm_ipc_buffer(client));
 
                 crm_err("Discarding old reply %d (need %d)", hdr->qb.id, request_id);
                 crm_log_xml_notice(bad, "OldIpcReply");
 
             } else {
                 xmlNode *bad = string2xml(crm_ipc_buffer(client));
 
                 crm_err("Discarding newer reply %d (need %d)", hdr->qb.id, request_id);
                 crm_log_xml_notice(bad, "ImpossibleReply");
                 CRM_ASSERT(hdr->qb.id <= request_id);
             }
         } else if (crm_ipc_connected(client) == FALSE) {
             crm_err("Server disconnected client %s while waiting for msg id %d", client->name,
                     request_id);
             break;
         }
 
     } while (time(NULL) < timeout);
 
     return rc;
 }
 
 int
 crm_ipc_send(crm_ipc_t * client, xmlNode * message, enum crm_ipc_flags flags, int32_t ms_timeout,
              xmlNode ** reply)
 {
     long rc = 0;
     struct iovec *iov;
     static uint32_t id = 0;
     struct crm_ipc_response_header *header;
 
     crm_ipc_init();
 
     if (client == NULL) {
         crm_notice("Invalid connection");
         return -ENOTCONN;
 
     } else if (crm_ipc_connected(client) == FALSE) {
         /* Don't even bother */
         crm_notice("Connection to %s closed", client->name);
         return -ENOTCONN;
     }
 
     if (client->need_reply) {
         crm_trace("Trying again to obtain pending reply from %s", client->name);
         rc = qb_ipcc_recv(client->ipc, client->buffer, client->buf_size, 300);
         if (rc < 0) {
-            crm_warn("Sending to %s (%p) is disabled until pending reply is recieved", client->name,
+            crm_warn("Sending to %s (%p) is disabled until pending reply is received", client->name,
                      client->ipc);
             return -EALREADY;
 
         } else {
             crm_notice("Lost reply from %s (%p) finally arrived, sending re-enabled", client->name,
                        client->ipc);
             client->need_reply = FALSE;
         }
     }
 
     rc = crm_ipc_prepare(++id, message, &iov);
     if(rc < 0) {
         return rc;
     }
 
     header = iov[0].iov_base;
     header->flags |= flags;
 
     if (ms_timeout == 0) {
         ms_timeout = 5000;
     }
 
     crm_trace("Sending from client: %s request id: %d bytes: %u timeout:%d msg...",
               client->name, header->qb.id, header->qb.size, ms_timeout);
 
     if (ms_timeout > 0) {
 
         rc = internal_ipc_send_request(client, iov, ms_timeout);
 
         if (rc <= 0) {
             crm_trace("Failed to send from client %s request %d with %u bytes...",
                       client->name, header->qb.id, header->qb.size);
             goto send_cleanup;
 
         } else if (is_not_set(flags, crm_ipc_client_response)) {
             crm_trace("Message sent, not waiting for reply to %d from %s to %u bytes...",
                       header->qb.id, client->name, header->qb.size);
 
             goto send_cleanup;
         }
 
         rc = internal_ipc_get_reply(client, header->qb.id, ms_timeout);
         if (rc < 0) {
             /* No reply, for now, disable sending
              *
              * The alternative is to close the connection since we don't know
              * how to detect and discard out-of-sequence replies
              *
              * TODO - implement the above
              */
             client->need_reply = TRUE;
         }
 
     } else {
         rc = internal_ipc_send_recv(client, iov);
     }
 
     if (rc > 0) {
         struct crm_ipc_response_header *hdr = (struct crm_ipc_response_header *)client->buffer;
 
-        crm_trace("Recieved response %d, size=%d, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size,
+        crm_trace("Received response %d, size=%d, rc=%ld, text: %.200s", hdr->qb.id, hdr->qb.size,
                   rc, crm_ipc_buffer(client));
 
         if (reply) {
             *reply = string2xml(crm_ipc_buffer(client));
         }
 
     } else {
-        crm_trace("Response not recieved: rc=%ld, errno=%d", rc, errno);
+        crm_trace("Response not received: rc=%ld, errno=%d", rc, errno);
     }
 
   send_cleanup:
     if (crm_ipc_connected(client) == FALSE) {
         crm_notice("Connection to %s closed: %s (%ld)", client->name, pcmk_strerror(rc), rc);
 
     } else if (rc == -ETIMEDOUT) {
         crm_warn("Request %d to %s (%p) failed: %s (%ld) after %dms",
                  header->qb.id, client->name, client->ipc, pcmk_strerror(rc), rc, ms_timeout);
         crm_write_blackbox(0, NULL);
 
     } else if (rc <= 0) {
         crm_warn("Request %d to %s (%p) failed: %s (%ld)",
                  header->qb.id, client->name, client->ipc, pcmk_strerror(rc), rc);
     }
 
     free(header);
     free(iov[1].iov_base);
     free(iov);
     return rc;
 }
 
 /* Utils */
 
 xmlNode *
 create_hello_message(const char *uuid,
                      const char *client_name, const char *major_version, const char *minor_version)
 {
     xmlNode *hello_node = NULL;
     xmlNode *hello = NULL;
 
     if (uuid == NULL || strlen(uuid) == 0
         || client_name == NULL || strlen(client_name) == 0
         || major_version == NULL || strlen(major_version) == 0
         || minor_version == NULL || strlen(minor_version) == 0) {
         crm_err("Missing fields, Hello message will not be valid.");
         return NULL;
     }
 
     hello_node = create_xml_node(NULL, XML_TAG_OPTIONS);
     crm_xml_add(hello_node, "major_version", major_version);
     crm_xml_add(hello_node, "minor_version", minor_version);
     crm_xml_add(hello_node, "client_name", client_name);
     crm_xml_add(hello_node, "client_uuid", uuid);
 
     crm_trace("creating hello message");
     hello = create_request(CRM_OP_HELLO, hello_node, NULL, NULL, client_name, uuid);
     free_xml(hello_node);
 
     return hello;
 }
diff --git a/lib/fencing/st_client.c b/lib/fencing/st_client.c
index 7b6e4daf7a..385fa1c98f 100644
--- a/lib/fencing/st_client.c
+++ b/lib/fencing/st_client.c
@@ -1,2468 +1,2468 @@
 /*
  * Copyright (c) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This library is free software; you can redistribute it and/or
  * modify it under the terms of the GNU Lesser General Public
  * License as published by the Free Software Foundation; either
  * version 2.1 of the License, or (at your option) any later version.
  *
  * This library is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * Lesser General Public License for more details.
  *
  * You should have received a copy of the GNU Lesser General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  *
  */
 #include <crm_internal.h>
 #include <unistd.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdarg.h>
 #include <string.h>
 #include <ctype.h>
 
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 
 #include <glib.h>
 #include <dirent.h>
 #include <libgen.h>             /* Add it for compiling on OSX */
 
 #include <crm/crm.h>
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 
 #ifdef HAVE_STONITH_STONITH_H
 #  include <stonith/stonith.h>
 #  define LHA_STONITH_LIBRARY "libstonith.so.1"
 static void *lha_agents_lib = NULL;
 #endif
 
 #include <crm/common/mainloop.h>
 
 CRM_TRACE_INIT_DATA(stonith);
 
 struct stonith_action_s {
     /*! user defined data */
     char *agent;
     char *action;
     char *victim;
     char *args;
     int timeout;
     int async;
     void *userdata;
     void (*done_cb) (GPid pid, gint status, const char *output, gpointer user_data);
 
     /*! internal async track data */
     int fd_stdout;
     int last_timeout_signo;
 
     /*! internal timing information */
     time_t initial_start_time;
     int tries;
     int remaining_timeout;
     guint timer_sigterm;
     guint timer_sigkill;
     int max_retries;
 
     /* device output data */
     GPid pid;
     int rc;
     char *output;
 };
 
 typedef struct stonith_private_s {
     char *token;
     crm_ipc_t *ipc;
     mainloop_io_t *source;
     GHashTable *stonith_op_callback_table;
     GList *notify_list;
 
     void (*op_callback) (stonith_t * st, stonith_callback_data_t * data);
 
 } stonith_private_t;
 
 typedef struct stonith_notify_client_s {
     const char *event;
     const char *obj_id;         /* implement one day */
     const char *obj_type;       /* implement one day */
     void (*notify) (stonith_t * st, stonith_event_t * e);
 
 } stonith_notify_client_t;
 
 typedef struct stonith_callback_client_s {
     void (*callback) (stonith_t * st, stonith_callback_data_t * data);
     const char *id;
     void *user_data;
     gboolean only_success;
     gboolean allow_timeout_updates;
     struct timer_rec_s *timer;
 
 } stonith_callback_client_t;
 
 struct notify_blob_s {
     stonith_t *stonith;
     xmlNode *xml;
 };
 
 struct timer_rec_s {
     int call_id;
     int timeout;
     guint ref;
     stonith_t *stonith;
 };
 
 typedef int (*stonith_op_t) (const char *, int, const char *, xmlNode *,
                              xmlNode *, xmlNode *, xmlNode **, xmlNode **);
 
 static const char META_TEMPLATE[] =
     "<?xml version=\"1.0\"?>\n"
     "<!DOCTYPE resource-agent SYSTEM \"ra-api-1.dtd\">\n"
     "<resource-agent name=\"%s\">\n"
     "  <version>1.0</version>\n"
     "  <longdesc lang=\"en\">\n"
     "%s\n"
     "  </longdesc>\n"
     "  <shortdesc lang=\"en\">%s</shortdesc>\n"
     "%s\n"
     "  <actions>\n"
     "    <action name=\"start\"   timeout=\"20\" />\n"
     "    <action name=\"stop\"    timeout=\"15\" />\n"
     "    <action name=\"status\"  timeout=\"20\" />\n"
     "    <action name=\"monitor\" timeout=\"20\" interval=\"3600\"/>\n"
     "    <action name=\"meta-data\"  timeout=\"15\" />\n"
     "  </actions>\n"
     "  <special tag=\"heartbeat\">\n"
     "    <version>2.0</version>\n" "  </special>\n" "</resource-agent>\n";
 
 bool stonith_dispatch(stonith_t * st);
 int stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata);
 void stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc);
 xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
                            int call_options);
 int stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data,
                          xmlNode ** output_data, int call_options, int timeout);
 
 static void stonith_connection_destroy(gpointer user_data);
 static void stonith_send_notification(gpointer data, gpointer user_data);
 static int internal_stonith_action_execute(stonith_action_t * action);
 
 static void
 stonith_connection_destroy(gpointer user_data)
 {
     stonith_t *stonith = user_data;
     stonith_private_t *native = NULL;
     struct notify_blob_s blob;
 
     crm_trace("Sending destroyed notification");
     blob.stonith = stonith;
     blob.xml = create_xml_node(NULL, "notify");
 
     native = stonith->private;
     native->ipc = NULL;
     native->source = NULL;
 
     stonith->state = stonith_disconnected;
     crm_xml_add(blob.xml, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(blob.xml, F_SUBTYPE, T_STONITH_NOTIFY_DISCONNECT);
 
     g_list_foreach(native->notify_list, stonith_send_notification, &blob);
     free_xml(blob.xml);
 }
 
 xmlNode *
 create_device_registration_xml(const char *id, const char *namespace, const char *agent,
                                stonith_key_value_t * params)
 {
     xmlNode *data = create_xml_node(NULL, F_STONITH_DEVICE);
     xmlNode *args = create_xml_node(data, XML_TAG_ATTRS);
 
 #if HAVE_STONITH_STONITH_H
     namespace = get_stonith_provider(agent, namespace);
     if (safe_str_eq(namespace, "heartbeat")) {
         hash2field((gpointer) "plugin", (gpointer) agent, args);
         agent = "fence_legacy";
     }
 #endif
 
     crm_xml_add(data, XML_ATTR_ID, id);
     crm_xml_add(data, "origin", __FUNCTION__);
     crm_xml_add(data, "agent", agent);
     crm_xml_add(data, "namespace", namespace);
 
     for (; params; params = params->next) {
         hash2field((gpointer) params->key, (gpointer) params->value, args);
     }
 
     return data;
 }
 
 static int
 stonith_api_register_device(stonith_t * st, int call_options,
                             const char *id, const char *namespace, const char *agent,
                             stonith_key_value_t * params)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_device_registration_xml(id, namespace, agent, params);
 
     rc = stonith_send_command(st, STONITH_OP_DEVICE_ADD, data, NULL, call_options, 0);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_remove_device(stonith_t * st, int call_options, const char *name)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_xml_node(NULL, F_STONITH_DEVICE);
     crm_xml_add(data, "origin", __FUNCTION__);
     crm_xml_add(data, XML_ATTR_ID, name);
     rc = stonith_send_command(st, STONITH_OP_DEVICE_DEL, data, NULL, call_options, 0);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_remove_level(stonith_t * st, int options, const char *node, int level)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_xml_node(NULL, F_STONITH_LEVEL);
     crm_xml_add(data, "origin", __FUNCTION__);
     crm_xml_add(data, F_STONITH_TARGET, node);
     crm_xml_add_int(data, XML_ATTR_ID, level);
     rc = stonith_send_command(st, STONITH_OP_LEVEL_DEL, data, NULL, options, 0);
     free_xml(data);
 
     return rc;
 }
 
 xmlNode *
 create_level_registration_xml(const char *node, int level, stonith_key_value_t * device_list)
 {
     xmlNode *data = create_xml_node(NULL, F_STONITH_LEVEL);
 
     crm_xml_add_int(data, XML_ATTR_ID, level);
     crm_xml_add(data, F_STONITH_TARGET, node);
     crm_xml_add(data, "origin", __FUNCTION__);
 
     for (; device_list; device_list = device_list->next) {
         xmlNode *dev = create_xml_node(data, F_STONITH_DEVICE);
 
         crm_xml_add(dev, XML_ATTR_ID, device_list->value);
     }
 
     return data;
 }
 
 static int
 stonith_api_register_level(stonith_t * st, int options, const char *node, int level,
                            stonith_key_value_t * device_list)
 {
     int rc = 0;
     xmlNode *data = create_level_registration_xml(node, level, device_list);
 
     rc = stonith_send_command(st, STONITH_OP_LEVEL_ADD, data, NULL, options, 0);
     free_xml(data);
 
     return rc;
 }
 
 static void
 append_arg(gpointer key, gpointer value, gpointer user_data)
 {
     int len = 3;                /* =, \n, \0 */
     int last = 0;
     char **args = user_data;
 
     CRM_CHECK(key != NULL, return);
     CRM_CHECK(value != NULL, return);
 
     if (strstr(key, "pcmk_")) {
         return;
     } else if (strstr(key, CRM_META)) {
         return;
     } else if (safe_str_eq(key, "crm_feature_set")) {
         return;
     }
 
     len += strlen(key);
     len += strlen(value);
     if (*args != NULL) {
         last = strlen(*args);
     }
 
     *args = realloc(*args, last + len);
     crm_trace("Appending: %s=%s", (char *)key, (char *)value);
     sprintf((*args) + last, "%s=%s\n", (char *)key, (char *)value);
 }
 
 static void
 append_const_arg(const char *key, const char *value, char **arg_list)
 {
     char *glib_sucks_key = strdup(key);
     char *glib_sucks_value = strdup(value);
 
     append_arg(glib_sucks_key, glib_sucks_value, arg_list);
 
     free(glib_sucks_value);
     free(glib_sucks_key);
 }
 
 static void
 append_host_specific_args(const char *victim, const char *map, GHashTable * params, char **arg_list)
 {
     char *name = NULL;
     int last = 0, lpc = 0, max = 0;
 
     if (map == NULL) {
         /* The best default there is for now... */
         crm_debug("Using default arg map: port=uname");
         append_const_arg("port", victim, arg_list);
         return;
     }
 
     max = strlen(map);
     crm_debug("Processing arg map: %s", map);
     for (; lpc < max + 1; lpc++) {
         if (isalpha(map[lpc])) {
             /* keep going */
 
         } else if (map[lpc] == '=' || map[lpc] == ':') {
             free(name);
             name = calloc(1, 1 + lpc - last);
             memcpy(name, map + last, lpc - last);
             crm_debug("Got name: %s", name);
             last = lpc + 1;
 
         } else if (map[lpc] == 0 || map[lpc] == ',' || isspace(map[lpc])) {
             char *param = NULL;
             const char *value = NULL;
 
             param = calloc(1, 1 + lpc - last);
             memcpy(param, map + last, lpc - last);
             last = lpc + 1;
 
             crm_debug("Got key: %s", param);
             if (name == NULL) {
                 crm_err("Misparsed '%s', found '%s' without a name", map, param);
                 free(param);
                 continue;
             }
 
             if (safe_str_eq(param, "uname")) {
                 value = victim;
             } else {
                 char *key = crm_meta_name(param);
 
                 value = g_hash_table_lookup(params, key);
                 free(key);
             }
 
             if (value) {
                 crm_debug("Setting '%s'='%s' (%s) for %s", name, value, param, victim);
                 append_const_arg(name, value, arg_list);
 
             } else {
                 crm_err("No node attribute '%s' for '%s'", name, victim);
             }
 
             free(name);
             name = NULL;
             free(param);
             if (map[lpc] == 0) {
                 break;
             }
 
         } else if (isspace(map[lpc])) {
             last = lpc;
         }
     }
     free(name);
 }
 
 static char *
 make_args(const char *action, const char *victim, uint32_t victim_nodeid, GHashTable * device_args,
           GHashTable * port_map)
 {
     char buffer[512];
     char *arg_list = NULL;
     const char *value = NULL;
     const char *_action = action;
 
     CRM_CHECK(action != NULL, return NULL);
 
     buffer[511] = 0;
     snprintf(buffer, 511, "pcmk_%s_action", action);
     if (device_args) {
         value = g_hash_table_lookup(device_args, buffer);
     }
 
     if (value == NULL && device_args) {
         /* Legacy support for early 1.1 releases - Remove for 1.4 */
         snprintf(buffer, 511, "pcmk_%s_cmd", action);
         value = g_hash_table_lookup(device_args, buffer);
     }
 
     if (value == NULL && device_args && safe_str_eq(action, "off")) {
         /* Legacy support for late 1.1 releases - Remove for 1.4 */
         value = g_hash_table_lookup(device_args, "pcmk_poweroff_action");
     }
 
     if (value) {
         crm_info("Substituting action '%s' for requested operation '%s'", value, action);
         action = value;
     }
 
     append_const_arg(STONITH_ATTR_ACTION_OP, action, &arg_list);
     if (victim && device_args) {
         const char *alias = victim;
         const char *param = g_hash_table_lookup(device_args, STONITH_ATTR_HOSTARG);
 
         if (port_map && g_hash_table_lookup(port_map, victim)) {
             alias = g_hash_table_lookup(port_map, victim);
         }
 
         /* Always supply the node's name too:
          *    https://fedorahosted.org/cluster/wiki/FenceAgentAPI
          */
         append_const_arg("nodename", victim, &arg_list);
         if (victim_nodeid) {
             char nodeid_str[33] = { 0, };
             if (snprintf(nodeid_str, 33, "%u", (unsigned int)victim_nodeid)) {
                 crm_info("For stonith action (%s) for victim %s, adding nodeid (%d) to parameters",
                          action, victim, nodeid_str);
                 append_const_arg("nodeid", nodeid_str, &arg_list);
             }
         }
 
         /* Check if we need to supply the victim in any other form */
         if (param == NULL) {
             const char *map = g_hash_table_lookup(device_args, STONITH_ATTR_ARGMAP);
 
             if (map == NULL) {
                 param = "port";
                 value = g_hash_table_lookup(device_args, param);
 
             } else {
                 /* Legacy handling */
                 append_host_specific_args(alias, map, device_args, &arg_list);
                 value = map;    /* Nothing more to do */
             }
 
         } else if (safe_str_eq(param, "none")) {
             value = param;      /* Nothing more to do */
 
         } else {
             value = g_hash_table_lookup(device_args, param);
         }
 
         /* Don't overwrite explictly set values for $param */
         if (value == NULL || safe_str_eq(value, "dynamic")) {
             crm_debug("Performing %s action for node '%s' as '%s=%s'", action, victim, param,
                       alias);
             append_const_arg(param, alias, &arg_list);
         }
     }
 
     if (device_args) {
         g_hash_table_foreach(device_args, append_arg, &arg_list);
     }
 
     if(device_args && g_hash_table_lookup(device_args, STONITH_ATTR_ACTION_OP)) {
         if(safe_str_eq(_action,"list")
            || safe_str_eq(_action,"status")
            || safe_str_eq(_action,"monitor")
            || safe_str_eq(_action,"metadata")) {
             /* Force use of the calculated command for support ops
              * We don't want list or monitor ops initiating fencing, regardless of what the admin configured
              */
             append_const_arg(STONITH_ATTR_ACTION_OP, action, &arg_list);
         }
     }
 
     return arg_list;
 }
 
 static gboolean
 st_child_term(gpointer data)
 {
     int rc = 0;
     stonith_action_t *track = data;
 
     crm_info("Child %d timed out, sending SIGTERM", track->pid);
     track->timer_sigterm = 0;
     track->last_timeout_signo = SIGTERM;
     rc = kill(-track->pid, SIGTERM);
     if (rc < 0) {
         crm_perror(LOG_ERR, "Couldn't send SIGTERM to %d", track->pid);
     }
     return FALSE;
 }
 
 static gboolean
 st_child_kill(gpointer data)
 {
     int rc = 0;
     stonith_action_t *track = data;
 
     crm_info("Child %d timed out, sending SIGKILL", track->pid);
     track->timer_sigkill = 0;
     track->last_timeout_signo = SIGKILL;
     rc = kill(-track->pid, SIGKILL);
     if (rc < 0) {
         crm_perror(LOG_ERR, "Couldn't send SIGKILL to %d", track->pid);
     }
     return FALSE;
 }
 
 static void
 stonith_action_clear_tracking_data(stonith_action_t * action)
 {
     if (action->timer_sigterm > 0) {
         g_source_remove(action->timer_sigterm);
         action->timer_sigterm = 0;
     }
     if (action->timer_sigkill > 0) {
         g_source_remove(action->timer_sigkill);
         action->timer_sigkill = 0;
     }
     if (action->fd_stdout) {
         close(action->fd_stdout);
         action->fd_stdout = 0;
     }
     free(action->output);
     action->output = NULL;
     action->rc = 0;
     action->pid = 0;
     action->last_timeout_signo = 0;
 }
 
 static void
 stonith_action_destroy(stonith_action_t * action)
 {
     stonith_action_clear_tracking_data(action);
     free(action->agent);
     free(action->args);
     free(action->action);
     free(action->victim);
     free(action);
 }
 
 #define FAILURE_MAX_RETRIES 2
 stonith_action_t *
 stonith_action_create(const char *agent,
                       const char *_action,
                       const char *victim,
                       uint32_t victim_nodeid,
                       int timeout, GHashTable * device_args, GHashTable * port_map)
 {
     stonith_action_t *action;
 
     action = calloc(1, sizeof(stonith_action_t));
     crm_info("Initiating action %s for agent %s (target=%s)", _action, agent, victim);
     action->args = make_args(_action, victim, victim_nodeid, device_args, port_map);
     action->agent = strdup(agent);
     action->action = strdup(_action);
     if (victim) {
         action->victim = strdup(victim);
     }
     action->timeout = action->remaining_timeout = timeout;
     action->max_retries = FAILURE_MAX_RETRIES;
 
     if (device_args) {
         char buffer[512];
         const char *value = NULL;
 
         snprintf(buffer, 511, "pcmk_%s_retries", _action);
         value = g_hash_table_lookup(device_args, buffer);
 
         if (value) {
             action->max_retries = atoi(value);
         }
     }
 
     return action;
 }
 
 #define READ_MAX 500
 static char *
 read_output(int fd)
 {
     char buffer[READ_MAX];
     char *output = NULL;
     int len = 0;
     int more = 0;
 
     if (!fd) {
         return NULL;
     }
 
     do {
         errno = 0;
         memset(&buffer, 0, READ_MAX);
         more = read(fd, buffer, READ_MAX - 1);
 
         if (more > 0) {
             buffer[more] = 0; /* Make sure its nul-terminated for logging
                               * 'more' is always less than our buffer size
                               */
             crm_trace("Got %d more bytes: %.200s...", more, buffer);
             output = realloc(output, len + more + 1);
             snprintf(output + len, more + 1, "%s", buffer);
             len += more;
         }
 
     } while (more == (READ_MAX - 1) || (more < 0 && errno == EINTR));
 
     return output;
 }
 
 static gboolean
 update_remaining_timeout(stonith_action_t * action)
 {
     int diff = time(NULL) - action->initial_start_time;
 
     if (action->tries >= action->max_retries) {
         crm_info("Attempted to execute agent %s (%s) the maximum number of times (%d) allowed",
                  action->agent, action->action, action->max_retries);
         action->remaining_timeout = 0;
     } else if ((action->rc != -ETIME) && diff < (action->timeout * 0.7)) {
         /* only set remaining timeout period if there is 30%
          * or greater of the original timeout period left */
         action->remaining_timeout = action->timeout - diff;
     } else {
         action->remaining_timeout = 0;
     }
     return action->remaining_timeout ? TRUE : FALSE;
 }
 
 static void
 stonith_action_async_done(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)
 {
     stonith_action_t *action = mainloop_child_userdata(p);
 
     if (action->timer_sigterm > 0) {
         g_source_remove(action->timer_sigterm);
     }
     if (action->timer_sigkill > 0) {
         g_source_remove(action->timer_sigkill);
     }
 
     if (action->last_timeout_signo) {
         action->rc = -ETIME;
         crm_notice("Child process %d performing action '%s' timed out with signal %d",
                    pid, action->action, action->last_timeout_signo);
 
     } else if (signo) {
         action->rc = -ECONNABORTED;
         crm_notice("Child process %d performing action '%s' timed out with signal %d",
                    pid, action->action, signo);
 
     } else {
         action->rc = exitcode;
         crm_debug("Child process %d performing action '%s' exited with rc %d",
                   pid, action->action, exitcode);
     }
 
     action->output = read_output(action->fd_stdout);
 
     if (action->rc != pcmk_ok && update_remaining_timeout(action)) {
         int rc = internal_stonith_action_execute(action);
         if (rc == pcmk_ok) {
             return;
         }
     }
 
     if (action->done_cb) {
         action->done_cb(pid, action->rc, action->output, action->userdata);
     }
 
     stonith_action_destroy(action);
 }
 
 static int
 internal_stonith_action_execute(stonith_action_t * action)
 {
     int pid, status, len, rc = -EPROTO;
     int ret;
     int total = 0;
     int p_read_fd, p_write_fd;  /* parent read/write file descriptors */
     int c_read_fd, c_write_fd;  /* child read/write file descriptors */
     int fd1[2];
     int fd2[2];
     int is_retry = 0;
 
     /* clear any previous tracking data */
     stonith_action_clear_tracking_data(action);
 
     if (!action->tries) {
         action->initial_start_time = time(NULL);
     }
     action->tries++;
 
     if (action->tries > 1) {
         crm_info("Attempt %d to execute %s (%s). remaining timeout is %d",
                  action->tries, action->agent, action->action, action->remaining_timeout);
         is_retry = 1;
     }
 
     c_read_fd = c_write_fd = p_read_fd = p_write_fd = -1;
 
     if (action->args == NULL || action->agent == NULL)
         goto fail;
     len = strlen(action->args);
 
     if (pipe(fd1))
         goto fail;
     p_read_fd = fd1[0];
     c_write_fd = fd1[1];
 
     if (pipe(fd2))
         goto fail;
     c_read_fd = fd2[0];
     p_write_fd = fd2[1];
 
     crm_debug("forking");
     pid = fork();
     if (pid < 0) {
         rc = -ECHILD;
         goto fail;
     }
 
     if (!pid) {
         /* child */
         setpgid(0, 0);
 
         close(1);
         /* coverity[leaked_handle] False positive */
         if (dup(c_write_fd) < 0)
             goto fail;
         close(2);
         /* coverity[leaked_handle] False positive */
         if (dup(c_write_fd) < 0)
             goto fail;
         close(0);
         /* coverity[leaked_handle] False positive */
         if (dup(c_read_fd) < 0)
             goto fail;
 
         /* keep c_write_fd open so parent can report all errors. */
         close(c_read_fd);
         close(p_read_fd);
         close(p_write_fd);
 
         /* keep retries from executing out of control */
         if (is_retry) {
             sleep(1);
         }
         execlp(action->agent, action->agent, NULL);
         exit(EXIT_FAILURE);
     }
 
     /* parent */
     action->pid = pid;
     ret = fcntl(p_read_fd, F_SETFL, fcntl(p_read_fd, F_GETFL, 0) | O_NONBLOCK);
     if (ret < 0) {
         crm_perror(LOG_NOTICE, "Could not change the output of %s to be non-blocking",
                    action->agent);
     }
 
     do {
         crm_debug("sending args");
         ret = write(p_write_fd, action->args + total, len - total);
         if (ret > 0) {
             total += ret;
         }
 
     } while (errno == EINTR && total < len);
 
     if (total != len) {
         crm_perror(LOG_ERR, "Sent %d not %d bytes", total, len);
         if (ret >= 0) {
             rc = -ECOMM;
         }
         goto fail;
     }
 
     close(p_write_fd); p_write_fd = -1;
 
     /* async */
     if (action->async) {
         action->fd_stdout = p_read_fd;
         mainloop_child_add(pid, 0/* Move the timeout here? */, action->action, action, stonith_action_async_done);
         crm_trace("Op: %s on %s, pid: %d, timeout: %ds", action->action, action->agent, pid,
                   action->remaining_timeout);
         action->last_timeout_signo = 0;
         if (action->remaining_timeout) {
             action->timer_sigterm =
                 g_timeout_add(1000 * action->remaining_timeout, st_child_term, action);
             action->timer_sigkill =
                 g_timeout_add(1000 * (action->remaining_timeout + 5), st_child_kill, action);
         } else {
             crm_err("No timeout set for stonith operation %s with device %s",
                     action->action, action->agent);
         }
 
         close(c_write_fd);
         close(c_read_fd);
         return 0;
 
     } else {
         /* sync */
         int timeout = action->remaining_timeout + 1;
         pid_t p = 0;
 
         while (action->remaining_timeout < 0 || timeout > 0) {
             p = waitpid(pid, &status, WNOHANG);
             if (p > 0) {
                 break;
             }
             sleep(1);
             timeout--;
         }
 
         if (timeout == 0) {
             int killrc = kill(-pid, SIGKILL);
 
             if (killrc && errno != ESRCH) {
                 crm_err("kill(%d, KILL) failed: %s (%d)", pid, pcmk_strerror(errno), errno);
             }
             /*
              * From sigprocmask(2):
              * It is not possible to block SIGKILL or SIGSTOP.  Attempts to do so are silently ignored.
              *
              * This makes it safe to skip WNOHANG here
              */
             p = waitpid(pid, &status, 0);
         }
 
         if (p <= 0) {
             crm_perror(LOG_ERR, "waitpid(%d)", pid);
 
         } else if (p != pid) {
             crm_err("Waited for %d, got %d", pid, p);
         }
 
         action->output = read_output(p_read_fd);
 
         action->rc = -ECONNABORTED;
         rc = action->rc;
         if (timeout == 0) {
             action->rc = -ETIME;
         } else if (WIFEXITED(status)) {
             crm_debug("result = %d", WEXITSTATUS(status));
             action->rc = -WEXITSTATUS(status);
             rc = 0;
 
         } else if (WIFSIGNALED(status)) {
             crm_err("call %s for %s exited due to signal %d", action->action, action->agent,
                     WTERMSIG(status));
 
         } else {
             crm_err("call %s for %s exited abnormally. stopped=%d, continued=%d",
                     action->action, action->agent, WIFSTOPPED(status), WIFCONTINUED(status));
         }
     }
 
   fail:
 
     if (p_read_fd >= 0) {
         close(p_read_fd);
     }
     if (p_write_fd >= 0) {
         close(p_write_fd);
     }
 
     if (c_read_fd >= 0) {
         close(c_read_fd);
     }
     if (c_write_fd >= 0) {
         close(c_write_fd);
     }
 
     return rc;
 }
 
 GPid
 stonith_action_execute_async(stonith_action_t * action,
                              void *userdata,
                              void (*done) (GPid pid, int rc, const char *output,
                                            gpointer user_data))
 {
     int rc = 0;
 
     if (!action) {
         return -1;
     }
 
     action->userdata = userdata;
     action->done_cb = done;
     action->async = 1;
 
     rc = internal_stonith_action_execute(action);
 
     return rc < 0 ? rc : action->pid;
 }
 
 int
 stonith_action_execute(stonith_action_t * action, int *agent_result, char **output)
 {
     int rc = 0;
 
     if (!action) {
         return -1;
     }
 
     do {
         rc = internal_stonith_action_execute(action);
         if (rc == pcmk_ok) {
             /* success! */
             break;
         }
         /* keep retrying while we have time left */
     } while (update_remaining_timeout(action));
 
     if (rc) {
         /* error */
         return rc;
     }
 
     if (agent_result) {
         *agent_result = action->rc;
     }
     if (output) {
         *output = action->output;
         action->output = NULL;  /* handed it off, do not free */
     }
 
     stonith_action_destroy(action);
     return rc;
 }
 
 static int
 stonith_api_device_list(stonith_t * stonith, int call_options, const char *namespace,
                         stonith_key_value_t ** devices, int timeout)
 {
     int count = 0;
 
     if (devices == NULL) {
         crm_err("Parameter error: stonith_api_device_list");
         return -EFAULT;
     }
 
     /* Include Heartbeat agents */
     if (namespace == NULL || safe_str_eq("heartbeat", namespace)) {
 #if HAVE_STONITH_STONITH_H
         static gboolean need_init = TRUE;
 
         char **entry = NULL;
         char **type_list = NULL;
         static char **(*type_list_fn) (void) = NULL;
         static void (*type_free_fn) (char **) = NULL;
 
         if (need_init) {
             need_init = FALSE;
             type_list_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_types", FALSE);
             type_free_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_free_hostlist",
                                       FALSE);
         }
 
         if (type_list_fn) {
             type_list = (*type_list_fn) ();
         }
 
         for (entry = type_list; entry != NULL && *entry; ++entry) {
             crm_trace("Added: %s", *entry);
             *devices = stonith_key_value_add(*devices, NULL, *entry);
             count++;
         }
         if (type_list && type_free_fn) {
             (*type_free_fn) (type_list);
         }
 #else
         if (namespace != NULL) {
             return -EINVAL;     /* Heartbeat agents not supported */
         }
 #endif
     }
 
     /* Include Red Hat agents, basically: ls -1 @sbin_dir@/fence_* */
     if (namespace == NULL || safe_str_eq("redhat", namespace)) {
         struct dirent **namelist;
         int file_num = scandir(RH_STONITH_DIR, &namelist, 0, alphasort);
 
         if (file_num > 0) {
             struct stat prop;
             char buffer[FILENAME_MAX + 1];
 
             while (file_num--) {
                 if ('.' == namelist[file_num]->d_name[0]) {
                     free(namelist[file_num]);
                     continue;
 
                 } else if (0 != strncmp(RH_STONITH_PREFIX,
                                         namelist[file_num]->d_name, strlen(RH_STONITH_PREFIX))) {
                     free(namelist[file_num]);
                     continue;
                 }
 
                 snprintf(buffer, FILENAME_MAX, "%s/%s", RH_STONITH_DIR, namelist[file_num]->d_name);
                 if (stat(buffer, &prop) == 0 && S_ISREG(prop.st_mode)) {
                     *devices = stonith_key_value_add(*devices, NULL, namelist[file_num]->d_name);
                     count++;
                 }
 
                 free(namelist[file_num]);
             }
             free(namelist);
         }
     }
 
     return count;
 }
 
 #if HAVE_STONITH_STONITH_H
 static inline char *
 strdup_null(const char *val)
 {
     if (val) {
         return strdup(val);
     }
     return NULL;
 }
 
 static void
 stonith_plugin(int priority, const char *fmt, ...)
 G_GNUC_PRINTF(2, 3);
 
 static void
 stonith_plugin(int priority, const char *fmt, ...)
 {
     va_list args;
     char *str;
     int err = errno;
 
     va_start(args, fmt);
     str = g_strdup_vprintf(fmt, args);
     va_end(args);
     do_crm_log_alias(priority, __FILE__, __func__, __LINE__, "%s", str);
     g_free(str);
     errno = err;
 }
 #endif
 
 static int
 stonith_api_device_metadata(stonith_t * stonith, int call_options, const char *agent,
                             const char *namespace, char **output, int timeout)
 {
     int rc = 0;
     char *buffer = NULL;
     const char *provider = get_stonith_provider(agent, namespace);
 
     crm_trace("looking up %s/%s metadata", agent, provider);
 
     /* By having this in a library, we can access it from stonith_admin
      *  when neither lrmd or stonith-ng are running
      * Important for the crm shell's validations...
      */
 
     if (safe_str_eq(provider, "redhat")) {
         stonith_action_t *action = stonith_action_create(agent, "metadata", NULL, 0, 5, NULL, NULL);
         int exec_rc = stonith_action_execute(action, &rc, &buffer);
 
         if (exec_rc < 0 || rc != 0 || buffer == NULL) {
             crm_debug("Query failed: %d %d: %s", exec_rc, rc, crm_str(buffer));
             free(buffer);       /* Just in case */
             return -EINVAL;
 
         } else {
 
             xmlNode *xml = string2xml(buffer);
             xmlNode *actions = NULL;
             xmlXPathObject *xpathObj = NULL;
 
             xpathObj = xpath_search(xml, "//actions");
             if (numXpathResults(xpathObj) > 0) {
                 actions = getXpathResult(xpathObj, 0);
             }
 
             freeXpathObject(xpathObj);
 
             /* Now fudge the metadata so that the start/stop actions appear */
             xpathObj = xpath_search(xml, "//action[@name='stop']");
             if (numXpathResults(xpathObj) <= 0) {
                 xmlNode *tmp = NULL;
 
                 tmp = create_xml_node(actions, "action");
                 crm_xml_add(tmp, "name", "stop");
                 crm_xml_add(tmp, "timeout", "20s");
 
                 tmp = create_xml_node(actions, "action");
                 crm_xml_add(tmp, "name", "start");
                 crm_xml_add(tmp, "timeout", "20s");
             }
 
             freeXpathObject(xpathObj);
 
             /* Now fudge the metadata so that the port isn't required in the configuration */
             xpathObj = xpath_search(xml, "//parameter[@name='port']");
             if (numXpathResults(xpathObj) > 0) {
                 /* We'll fill this in */
                 xmlNode *tmp = getXpathResult(xpathObj, 0);
 
                 crm_xml_add(tmp, "required", "0");
             }
 
             freeXpathObject(xpathObj);
             free(buffer);
             buffer = dump_xml_formatted(xml);
             free_xml(xml);
             if (!buffer) {
                 return -EINVAL;
             }
         }
 
     } else {
 #if !HAVE_STONITH_STONITH_H
         return -EINVAL;         /* Heartbeat agents not supported */
 #else
         int bufferlen = 0;
         static const char *no_parameter_info = "<!-- no value -->";
 
         Stonith *stonith_obj = NULL;
 
         static gboolean need_init = TRUE;
         static Stonith *(*st_new_fn) (const char *) = NULL;
         static const char *(*st_info_fn) (Stonith *, int) = NULL;
         static void (*st_del_fn) (Stonith *) = NULL;
         static void (*st_log_fn) (Stonith *, PILLogFun) = NULL;
 
         if (need_init) {
             need_init = FALSE;
             st_new_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_new", FALSE);
             st_del_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_delete",
                                       FALSE);
             st_log_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_set_log",
                                       FALSE);
             st_info_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_get_info",
                                       FALSE);
         }
 
         if (lha_agents_lib && st_new_fn && st_del_fn && st_info_fn && st_log_fn) {
             char *xml_meta_longdesc = NULL;
             char *xml_meta_shortdesc = NULL;
 
             char *meta_param = NULL;
             char *meta_longdesc = NULL;
             char *meta_shortdesc = NULL;
 
             stonith_obj = (*st_new_fn) (agent);
             if (stonith_obj) {
                 (*st_log_fn) (stonith_obj, (PILLogFun) & stonith_plugin);
                 meta_longdesc = strdup_null((*st_info_fn) (stonith_obj, ST_DEVICEDESCR));
                 if (meta_longdesc == NULL) {
                     crm_warn("no long description in %s's metadata.", agent);
                     meta_longdesc = strdup(no_parameter_info);
                 }
 
                 meta_shortdesc = strdup_null((*st_info_fn) (stonith_obj, ST_DEVICEID));
                 if (meta_shortdesc == NULL) {
                     crm_warn("no short description in %s's metadata.", agent);
                     meta_shortdesc = strdup(no_parameter_info);
                 }
 
                 meta_param = strdup_null((*st_info_fn) (stonith_obj, ST_CONF_XML));
                 if (meta_param == NULL) {
                     crm_warn("no list of parameters in %s's metadata.", agent);
                     meta_param = strdup(no_parameter_info);
                 }
                 (*st_del_fn) (stonith_obj);
             } else {
                 return -EINVAL; /* Heartbeat agents not supported */
             }
 
             xml_meta_longdesc =
                 (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_longdesc);
             xml_meta_shortdesc =
                 (char *)xmlEncodeEntitiesReentrant(NULL, (const unsigned char *)meta_shortdesc);
 
             bufferlen = strlen(META_TEMPLATE) + strlen(agent)
                 + strlen(xml_meta_longdesc) + strlen(xml_meta_shortdesc)
                 + strlen(meta_param) + 1;
 
             buffer = calloc(1, bufferlen);
             snprintf(buffer, bufferlen - 1, META_TEMPLATE,
                      agent, xml_meta_longdesc, xml_meta_shortdesc, meta_param);
 
             xmlFree(xml_meta_longdesc);
             xmlFree(xml_meta_shortdesc);
 
             free(meta_shortdesc);
             free(meta_longdesc);
             free(meta_param);
         }
 #endif
     }
 
     if (output) {
         *output = buffer;
 
     } else {
         free(buffer);
     }
 
     return rc;
 }
 
 static int
 stonith_api_query(stonith_t * stonith, int call_options, const char *target,
                   stonith_key_value_t ** devices, int timeout)
 {
     int rc = 0, lpc = 0, max = 0;
 
     xmlNode *data = NULL;
     xmlNode *output = NULL;
     xmlXPathObjectPtr xpathObj = NULL;
 
     CRM_CHECK(devices != NULL, return -EINVAL);
 
     data = create_xml_node(NULL, F_STONITH_DEVICE);
     crm_xml_add(data, "origin", __FUNCTION__);
     crm_xml_add(data, F_STONITH_TARGET, target);
     crm_xml_add(data, F_STONITH_ACTION, "off");
     rc = stonith_send_command(stonith, STONITH_OP_QUERY, data, &output, call_options, timeout);
 
     if (rc < 0) {
         return rc;
     }
 
     xpathObj = xpath_search(output, "//@agent");
     if (xpathObj) {
         max = numXpathResults(xpathObj);
 
         for (lpc = 0; lpc < max; lpc++) {
             xmlNode *match = getXpathResult(xpathObj, lpc);
 
             CRM_CHECK(match != NULL, continue);
 
             crm_info("%s[%d] = %s", "//@agent", lpc, xmlGetNodePath(match));
             *devices = stonith_key_value_add(*devices, NULL, crm_element_value(match, XML_ATTR_ID));
         }
 
         freeXpathObject(xpathObj);
     }
 
     free_xml(output);
     free_xml(data);
     return max;
 }
 
 static int
 stonith_api_call(stonith_t * stonith,
                  int call_options,
                  const char *id,
                  const char *action, const char *victim, int timeout, xmlNode ** output)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_xml_node(NULL, F_STONITH_DEVICE);
     crm_xml_add(data, "origin", __FUNCTION__);
     crm_xml_add(data, F_STONITH_DEVICE, id);
     crm_xml_add(data, F_STONITH_ACTION, action);
     crm_xml_add(data, F_STONITH_TARGET, victim);
 
     rc = stonith_send_command(stonith, STONITH_OP_EXEC, data, output, call_options, timeout);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_list(stonith_t * stonith, int call_options, const char *id, char **list_info,
                  int timeout)
 {
     int rc;
     xmlNode *output = NULL;
 
     rc = stonith_api_call(stonith, call_options, id, "list", NULL, timeout, &output);
 
     if (output && list_info) {
         const char *list_str;
 
         list_str = crm_element_value(output, "st_output");
 
         if (list_str) {
             *list_info = strdup(list_str);
         }
     }
 
     if (output) {
         free_xml(output);
     }
 
     return rc;
 }
 
 static int
 stonith_api_monitor(stonith_t * stonith, int call_options, const char *id, int timeout)
 {
     return stonith_api_call(stonith, call_options, id, "monitor", NULL, timeout, NULL);
 }
 
 static int
 stonith_api_status(stonith_t * stonith, int call_options, const char *id, const char *port,
                    int timeout)
 {
     return stonith_api_call(stonith, call_options, id, "status", port, timeout, NULL);
 }
 
 static int
 stonith_api_fence(stonith_t * stonith, int call_options, const char *node, const char *action,
                   int timeout, int tolerance)
 {
     int rc = 0;
     xmlNode *data = NULL;
 
     data = create_xml_node(NULL, __FUNCTION__);
     crm_xml_add(data, F_STONITH_TARGET, node);
     crm_xml_add(data, F_STONITH_ACTION, action);
     crm_xml_add_int(data, F_STONITH_TIMEOUT, timeout);
     crm_xml_add_int(data, F_STONITH_TOLERANCE, tolerance);
 
     rc = stonith_send_command(stonith, STONITH_OP_FENCE, data, NULL, call_options, timeout);
     free_xml(data);
 
     return rc;
 }
 
 static int
 stonith_api_confirm(stonith_t * stonith, int call_options, const char *target)
 {
     return stonith_api_fence(stonith, call_options | st_opt_manual_ack, target, "off", 0, 0);
 }
 
 static int
 stonith_api_history(stonith_t * stonith, int call_options, const char *node,
                     stonith_history_t ** history, int timeout)
 {
     int rc = 0;
     xmlNode *data = NULL;
     xmlNode *output = NULL;
     stonith_history_t *last = NULL;
 
     *history = NULL;
 
     if (node) {
         data = create_xml_node(NULL, __FUNCTION__);
         crm_xml_add(data, F_STONITH_TARGET, node);
     }
 
     rc = stonith_send_command(stonith, STONITH_OP_FENCE_HISTORY, data, &output,
                               call_options | st_opt_sync_call, timeout);
     free_xml(data);
 
     if (rc == 0) {
         xmlNode *op = NULL;
         xmlNode *reply = get_xpath_object("//" F_STONITH_HISTORY_LIST, output, LOG_ERR);
 
         for (op = __xml_first_child(reply); op != NULL; op = __xml_next(op)) {
             stonith_history_t *kvp;
 
             kvp = calloc(1, sizeof(stonith_history_t));
             kvp->target = crm_element_value_copy(op, F_STONITH_TARGET);
             kvp->action = crm_element_value_copy(op, F_STONITH_ACTION);
             kvp->origin = crm_element_value_copy(op, F_STONITH_ORIGIN);
             kvp->delegate = crm_element_value_copy(op, F_STONITH_DELEGATE);
             kvp->client = crm_element_value_copy(op, F_STONITH_CLIENTNAME);
             crm_element_value_int(op, F_STONITH_DATE, &kvp->completed);
             crm_element_value_int(op, F_STONITH_STATE, &kvp->state);
 
             if (last) {
                 last->next = kvp;
             } else {
                 *history = kvp;
             }
             last = kvp;
         }
     }
     return rc;
 }
 
 gboolean
 is_redhat_agent(const char *agent)
 {
     int rc = 0;
     struct stat prop;
     char buffer[FILENAME_MAX + 1];
 
     snprintf(buffer, FILENAME_MAX, "%s/%s", RH_STONITH_DIR, agent);
     rc = stat(buffer, &prop);
     if (rc >= 0 && S_ISREG(prop.st_mode)) {
         return TRUE;
     }
     return FALSE;
 }
 
 const char *
 get_stonith_provider(const char *agent, const char *provider)
 {
     /* This function sucks */
     if (is_redhat_agent(agent)) {
         return "redhat";
 
 #if HAVE_STONITH_STONITH_H
     } else {
         Stonith *stonith_obj = NULL;
 
         static gboolean need_init = TRUE;
         static Stonith *(*st_new_fn) (const char *) = NULL;
         static void (*st_del_fn) (Stonith *) = NULL;
 
         if (need_init) {
             need_init = FALSE;
             st_new_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_new", FALSE);
             st_del_fn =
                 find_library_function(&lha_agents_lib, LHA_STONITH_LIBRARY, "stonith_delete",
                                       FALSE);
         }
 
         if (lha_agents_lib && st_new_fn && st_del_fn) {
             stonith_obj = (*st_new_fn) (agent);
             if (stonith_obj) {
                 (*st_del_fn) (stonith_obj);
                 return "heartbeat";
             }
         }
 #endif
     }
 
     crm_err("No such device: %s", agent);
     return NULL;
 }
 
 static gint
 stonithlib_GCompareFunc(gconstpointer a, gconstpointer b)
 {
     int rc = 0;
     const stonith_notify_client_t *a_client = a;
     const stonith_notify_client_t *b_client = b;
 
     CRM_CHECK(a_client->event != NULL && b_client->event != NULL, return 0);
     rc = strcmp(a_client->event, b_client->event);
     if (rc == 0) {
         if (a_client->notify == NULL || b_client->notify == NULL) {
             return 0;
 
         } else if (a_client->notify == b_client->notify) {
             return 0;
 
         } else if (((long)a_client->notify) < ((long)b_client->notify)) {
             crm_err("callbacks for %s are not equal: %p vs. %p",
                     a_client->event, a_client->notify, b_client->notify);
             return -1;
         }
         crm_err("callbacks for %s are not equal: %p vs. %p",
                 a_client->event, a_client->notify, b_client->notify);
         return 1;
     }
     return rc;
 }
 
 xmlNode *
 stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data, int call_options)
 {
     xmlNode *op_msg = create_xml_node(NULL, "stonith_command");
 
     CRM_CHECK(op_msg != NULL, return NULL);
     CRM_CHECK(token != NULL, return NULL);
 
     crm_xml_add(op_msg, F_XML_TAGNAME, "stonith_command");
 
     crm_xml_add(op_msg, F_TYPE, T_STONITH_NG);
     crm_xml_add(op_msg, F_STONITH_CALLBACK_TOKEN, token);
     crm_xml_add(op_msg, F_STONITH_OPERATION, op);
     crm_xml_add_int(op_msg, F_STONITH_CALLID, call_id);
     crm_trace("Sending call options: %.8lx, %d", (long)call_options, call_options);
     crm_xml_add_int(op_msg, F_STONITH_CALLOPTS, call_options);
 
     if (data != NULL) {
         add_message_xml(op_msg, F_STONITH_CALLDATA, data);
     }
 
     return op_msg;
 }
 
 static void
 stonith_destroy_op_callback(gpointer data)
 {
     stonith_callback_client_t *blob = data;
 
     if (blob->timer && blob->timer->ref > 0) {
         g_source_remove(blob->timer->ref);
     }
     free(blob->timer);
     free(blob);
 }
 
 static int
 stonith_api_signoff(stonith_t * stonith)
 {
     stonith_private_t *native = stonith->private;
 
     crm_debug("Signing out of the STONITH Service");
 
     if (native->source != NULL) {
         /* Attached to mainloop */
         mainloop_del_ipc_client(native->source);
         native->source = NULL;
         native->ipc = NULL;
 
     } else if (native->ipc) {
         /* Not attached to mainloop */
         crm_ipc_t *ipc = native->ipc;
 
         native->ipc = NULL;
         crm_ipc_close(ipc);
         crm_ipc_destroy(ipc);
     }
 
     free(native->token); native->token = NULL;
     stonith->state = stonith_disconnected;
     return pcmk_ok;
 }
 
 static int
 stonith_api_signon(stonith_t * stonith, const char *name, int *stonith_fd)
 {
     int rc = pcmk_ok;
     stonith_private_t *native = stonith->private;
 
     static struct ipc_client_callbacks st_callbacks = {
         .dispatch = stonith_dispatch_internal,
         .destroy = stonith_connection_destroy
     };
 
     crm_trace("Connecting command channel");
 
     stonith->state = stonith_connected_command;
     if (stonith_fd) {
         /* No mainloop */
         native->ipc = crm_ipc_new("stonith-ng", 0);
 
         if (native->ipc && crm_ipc_connect(native->ipc)) {
             *stonith_fd = crm_ipc_get_fd(native->ipc);
 
         } else if (native->ipc) {
             rc = -ENOTCONN;
         }
 
     } else {
         /* With mainloop */
         native->source =
             mainloop_add_ipc_client("stonith-ng", G_PRIORITY_MEDIUM, 0, stonith, &st_callbacks);
         native->ipc = mainloop_get_ipc_client(native->source);
     }
 
     if (native->ipc == NULL) {
         crm_debug("Could not connect to the Stonith API");
         rc = -ENOTCONN;
     }
 
     if (rc == pcmk_ok) {
         xmlNode *reply = NULL;
         xmlNode *hello = create_xml_node(NULL, "stonith_command");
 
         crm_xml_add(hello, F_TYPE, T_STONITH_NG);
         crm_xml_add(hello, F_STONITH_OPERATION, CRM_OP_REGISTER);
         crm_xml_add(hello, F_STONITH_CLIENTNAME, name);
         rc = crm_ipc_send(native->ipc, hello, crm_ipc_client_response, -1, &reply);
 
         if (rc < 0) {
             crm_perror(LOG_DEBUG, "Couldn't complete registration with the fencing API: %d", rc);
             rc = -ECOMM;
 
         } else if (reply == NULL) {
             crm_err("Did not receive registration reply");
             rc = -EPROTO;
 
         } else {
             const char *msg_type = crm_element_value(reply, F_STONITH_OPERATION);
             const char *tmp_ticket = crm_element_value(reply, F_STONITH_CLIENTID);
 
             if (safe_str_neq(msg_type, CRM_OP_REGISTER)) {
                 crm_err("Invalid registration message: %s", msg_type);
                 crm_log_xml_err(reply, "Bad reply");
                 rc = -EPROTO;
 
             } else if (tmp_ticket == NULL) {
                 crm_err("No registration token provided");
                 crm_log_xml_err(reply, "Bad reply");
                 rc = -EPROTO;
 
             } else {
                 crm_trace("Obtained registration token: %s", tmp_ticket);
                 native->token = strdup(tmp_ticket);
                 rc = pcmk_ok;
             }
         }
 
         free_xml(reply);
         free_xml(hello);
     }
 
     if (rc == pcmk_ok) {
 #if HAVE_MSGFROMIPC_TIMEOUT
         stonith->call_timeout = MAX_IPC_DELAY;
 #endif
         crm_debug("Connection to STONITH successful");
         return pcmk_ok;
     }
 
     crm_debug("Connection to STONITH failed: %s", pcmk_strerror(rc));
     stonith->cmds->disconnect(stonith);
     return rc;
 }
 
 static int
 stonith_set_notification(stonith_t * stonith, const char *callback, int enabled)
 {
     xmlNode *notify_msg = create_xml_node(NULL, __FUNCTION__);
     stonith_private_t *native = stonith->private;
 
     if (stonith->state != stonith_disconnected) {
         int rc;
 
         crm_xml_add(notify_msg, F_STONITH_OPERATION, T_STONITH_NOTIFY);
         if (enabled) {
             crm_xml_add(notify_msg, F_STONITH_NOTIFY_ACTIVATE, callback);
         } else {
             crm_xml_add(notify_msg, F_STONITH_NOTIFY_DEACTIVATE, callback);
         }
         rc = crm_ipc_send(native->ipc, notify_msg, crm_ipc_client_response, -1, NULL);
         if (rc < 0) {
             crm_perror(LOG_DEBUG, "Couldn't register for fencing notifications: %d", rc);
             rc = -ECOMM;
         }
     }
 
     free_xml(notify_msg);
     return pcmk_ok;
 }
 
 static int
 stonith_api_add_notification(stonith_t * stonith, const char *event,
                              void (*callback) (stonith_t * stonith, stonith_event_t * e))
 {
     GList *list_item = NULL;
     stonith_notify_client_t *new_client = NULL;
     stonith_private_t *private = NULL;
 
     private = stonith->private;
     crm_trace("Adding callback for %s events (%d)", event, g_list_length(private->notify_list));
 
     new_client = calloc(1, sizeof(stonith_notify_client_t));
     new_client->event = event;
     new_client->notify = callback;
 
     list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
 
     if (list_item != NULL) {
         crm_warn("Callback already present");
         free(new_client);
         return -ENOTUNIQ;
 
     } else {
         private->notify_list = g_list_append(private->notify_list, new_client);
 
         stonith_set_notification(stonith, event, 1);
 
         crm_trace("Callback added (%d)", g_list_length(private->notify_list));
     }
     return pcmk_ok;
 }
 
 static int
 stonith_api_del_notification(stonith_t * stonith, const char *event)
 {
     GList *list_item = NULL;
     stonith_notify_client_t *new_client = NULL;
     stonith_private_t *private = NULL;
 
     crm_debug("Removing callback for %s events", event);
 
     private = stonith->private;
     new_client = calloc(1, sizeof(stonith_notify_client_t));
     new_client->event = event;
     new_client->notify = NULL;
 
     list_item = g_list_find_custom(private->notify_list, new_client, stonithlib_GCompareFunc);
 
     stonith_set_notification(stonith, event, 0);
 
     if (list_item != NULL) {
         stonith_notify_client_t *list_client = list_item->data;
 
         private->notify_list = g_list_remove(private->notify_list, list_client);
         free(list_client);
 
         crm_trace("Removed callback");
 
     } else {
         crm_trace("Callback not present");
     }
     free(new_client);
     return pcmk_ok;
 }
 
 static gboolean
 stonith_async_timeout_handler(gpointer data)
 {
     struct timer_rec_s *timer = data;
 
     crm_err("Async call %d timed out after %dms", timer->call_id, timer->timeout);
     stonith_perform_callback(timer->stonith, NULL, timer->call_id, -ETIME);
 
     /* Always return TRUE, never remove the handler
      * We do that in stonith_del_callback()
      */
     return TRUE;
 }
 
 static void
 set_callback_timeout(stonith_callback_client_t * callback, stonith_t * stonith, int call_id,
                      int timeout)
 {
     struct timer_rec_s *async_timer = callback->timer;
 
     if (timeout <= 0) {
         return;
     }
 
     if (!async_timer) {
         async_timer = calloc(1, sizeof(struct timer_rec_s));
         callback->timer = async_timer;
     }
 
     async_timer->stonith = stonith;
     async_timer->call_id = call_id;
     /* Allow a fair bit of grace to allow the server to tell us of a timeout
      * This is only a fallback
      */
     async_timer->timeout = (timeout + 60) * 1000;
     if (async_timer->ref) {
         g_source_remove(async_timer->ref);
     }
     async_timer->ref =
         g_timeout_add(async_timer->timeout, stonith_async_timeout_handler, async_timer);
 }
 
 static void
 update_callback_timeout(int call_id, int timeout, stonith_t * st)
 {
     stonith_callback_client_t *callback = NULL;
     stonith_private_t *private = st->private;
 
     callback = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
     if (!callback || !callback->allow_timeout_updates) {
         return;
     }
 
     set_callback_timeout(callback, st, call_id, timeout);
 }
 
 static void
 invoke_callback(stonith_t * st, int call_id, int rc, void *userdata,
                 void (*callback) (stonith_t * st, stonith_callback_data_t * data))
 {
     stonith_callback_data_t data = { 0, };
 
     data.call_id = call_id;
     data.rc = rc;
     data.userdata = userdata;
 
     callback(st, &data);
 }
 
 static int
 stonith_api_add_callback(stonith_t * stonith, int call_id, int timeout, int options,
                          void *user_data, const char *callback_name,
                          void (*callback) (stonith_t * st, stonith_callback_data_t * data))
 {
     stonith_callback_client_t *blob = NULL;
     stonith_private_t *private = NULL;
 
     CRM_CHECK(stonith != NULL, return -EINVAL);
     CRM_CHECK(stonith->private != NULL, return -EINVAL);
     private = stonith->private;
 
     if (call_id == 0) {
         private->op_callback = callback;
 
     } else if (call_id < 0) {
         if (!(options & st_opt_report_only_success)) {
             crm_trace("Call failed, calling %s: %s", callback_name, pcmk_strerror(call_id));
             invoke_callback(stonith, call_id, call_id, user_data, callback);
         } else {
             crm_warn("STONITH call failed: %s", pcmk_strerror(call_id));
         }
         return FALSE;
     }
 
     blob = calloc(1, sizeof(stonith_callback_client_t));
     blob->id = callback_name;
     blob->only_success = (options & st_opt_report_only_success) ? TRUE : FALSE;
     blob->user_data = user_data;
     blob->callback = callback;
     blob->allow_timeout_updates = (options & st_opt_timeout_updates) ? TRUE : FALSE;
 
     if (timeout > 0) {
         set_callback_timeout(blob, stonith, call_id, timeout);
     }
 
     g_hash_table_insert(private->stonith_op_callback_table, GINT_TO_POINTER(call_id), blob);
     crm_trace("Added callback to %s for call %d", callback_name, call_id);
 
     return TRUE;
 }
 
 static int
 stonith_api_del_callback(stonith_t * stonith, int call_id, bool all_callbacks)
 {
     stonith_private_t *private = stonith->private;
 
     if (all_callbacks) {
         private->op_callback = NULL;
         g_hash_table_destroy(private->stonith_op_callback_table);
         private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
                                                                    NULL,
                                                                    stonith_destroy_op_callback);
 
     } else if (call_id == 0) {
         private->op_callback = NULL;
 
     } else {
         g_hash_table_remove(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
     }
     return pcmk_ok;
 }
 
 static void
 stonith_dump_pending_op(gpointer key, gpointer value, gpointer user_data)
 {
     int call = GPOINTER_TO_INT(key);
     stonith_callback_client_t *blob = value;
 
     crm_debug("Call %d (%s): pending", call, crm_str(blob->id));
 }
 
 void
 stonith_dump_pending_callbacks(stonith_t * stonith)
 {
     stonith_private_t *private = stonith->private;
 
     if (private->stonith_op_callback_table == NULL) {
         return;
     }
     return g_hash_table_foreach(private->stonith_op_callback_table, stonith_dump_pending_op, NULL);
 }
 
 void
 stonith_perform_callback(stonith_t * stonith, xmlNode * msg, int call_id, int rc)
 {
     stonith_private_t *private = NULL;
     stonith_callback_client_t *blob = NULL;
     stonith_callback_client_t local_blob;
 
     CRM_CHECK(stonith != NULL, return);
     CRM_CHECK(stonith->private != NULL, return);
 
     private = stonith->private;
 
     local_blob.id = NULL;
     local_blob.callback = NULL;
     local_blob.user_data = NULL;
     local_blob.only_success = FALSE;
 
     if (msg != NULL) {
         crm_element_value_int(msg, F_STONITH_RC, &rc);
         crm_element_value_int(msg, F_STONITH_CALLID, &call_id);
     }
 
     CRM_CHECK(call_id > 0, crm_log_xml_err(msg, "Bad result"));
 
     blob = g_hash_table_lookup(private->stonith_op_callback_table, GINT_TO_POINTER(call_id));
 
     if (blob != NULL) {
         local_blob = *blob;
         blob = NULL;
 
         stonith_api_del_callback(stonith, call_id, FALSE);
 
     } else {
         crm_trace("No callback found for call %d", call_id);
         local_blob.callback = NULL;
     }
 
     if (local_blob.callback != NULL && (rc == pcmk_ok || local_blob.only_success == FALSE)) {
         crm_trace("Invoking callback %s for call %d", crm_str(local_blob.id), call_id);
         invoke_callback(stonith, call_id, rc, local_blob.user_data, local_blob.callback);
 
     } else if (private->op_callback == NULL && rc != pcmk_ok) {
         crm_warn("STONITH command failed: %s", pcmk_strerror(rc));
         crm_log_xml_debug(msg, "Failed STONITH Update");
     }
 
     if (private->op_callback != NULL) {
         crm_trace("Invoking global callback for call %d", call_id);
         invoke_callback(stonith, call_id, rc, NULL, private->op_callback);
     }
     crm_trace("OP callback activated.");
 }
 
 /*
  <notify t="st_notify" subt="st_device_register" st_op="st_device_register" st_rc="0" >
    <st_calldata >
      <stonith_command t="stonith-ng" st_async_id="088fb640-431a-48b9-b2fc-c4ff78d0a2d9" st_op="st_device_register" st_callid="2" st_callopt="4096" st_timeout="0" st_clientid="088fb640-431a-48b9-b2fc-c4ff78d0a2d9" st_clientname="stonith-test" >
        <st_calldata >
          <st_device_id id="test-id" origin="create_device_registration_xml" agent="fence_virsh" namespace="stonith-ng" >
            <attributes ipaddr="localhost" pcmk-portmal="some-host=pcmk-1 pcmk-3=3,4" login="root" identity_file="/root/.ssh/id_dsa" />
          </st_device_id>
        </st_calldata>
      </stonith_command>
    </st_calldata>
  </notify>
 
  <notify t="st_notify" subt="st_notify_fence" st_op="st_notify_fence" st_rc="0" >
    <st_calldata >
      <st_notify_fence st_rc="0" st_target="some-host" st_op="st_fence" st_delegate="test-id" st_origin="61dd7759-e229-4be7-b1f8-ef49dd14d9f0" />
    </st_calldata>
  </notify>
 */
 static stonith_event_t *
 xml_to_event(xmlNode * msg)
 {
     stonith_event_t *event = calloc(1, sizeof(stonith_event_t));
     const char *ntype = crm_element_value(msg, F_SUBTYPE);
     char *data_addr = g_strdup_printf("//%s", ntype);
     xmlNode *data = get_xpath_object(data_addr, msg, LOG_DEBUG);
 
     crm_log_xml_trace(msg, "stonith_notify");
 
     crm_element_value_int(msg, F_STONITH_RC, &(event->result));
 
     if (safe_str_eq(ntype, T_STONITH_NOTIFY_FENCE)) {
         event->operation = crm_element_value_copy(msg, F_STONITH_OPERATION);
 
         if (data) {
             event->origin = crm_element_value_copy(data, F_STONITH_ORIGIN);
             event->action = crm_element_value_copy(data, F_STONITH_ACTION);
             event->target = crm_element_value_copy(data, F_STONITH_TARGET);
             event->executioner = crm_element_value_copy(data, F_STONITH_DELEGATE);
             event->id = crm_element_value_copy(data, F_STONITH_REMOTE_OP_ID);
             event->client_origin = crm_element_value_copy(data, F_STONITH_CLIENTNAME);
         } else {
             crm_err("No data for %s event", ntype);
             crm_log_xml_notice(msg, "BadEvent");
         }
     }
 
     g_free(data_addr);
     return event;
 }
 
 static void
 event_free(stonith_event_t * event)
 {
     free(event->id);
     free(event->type);
     free(event->message);
     free(event->operation);
     free(event->origin);
     free(event->action);
     free(event->target);
     free(event->executioner);
     free(event->device);
     free(event->client_origin);
     free(event);
 }
 
 static void
 stonith_send_notification(gpointer data, gpointer user_data)
 {
     struct notify_blob_s *blob = user_data;
     stonith_notify_client_t *entry = data;
     stonith_event_t *st_event = NULL;
     const char *event = NULL;
 
     if (blob->xml == NULL) {
         crm_warn("Skipping callback - NULL message");
         return;
     }
 
     event = crm_element_value(blob->xml, F_SUBTYPE);
 
     if (entry == NULL) {
         crm_warn("Skipping callback - NULL callback client");
         return;
 
     } else if (entry->notify == NULL) {
         crm_warn("Skipping callback - NULL callback");
         return;
 
     } else if (safe_str_neq(entry->event, event)) {
         crm_trace("Skipping callback - event mismatch %p/%s vs. %s", entry, entry->event, event);
         return;
     }
 
     st_event = xml_to_event(blob->xml);
 
     crm_trace("Invoking callback for %p/%s event...", entry, event);
     entry->notify(blob->stonith, st_event);
     crm_trace("Callback invoked...");
 
     event_free(st_event);
 }
 
 int
 stonith_send_command(stonith_t * stonith, const char *op, xmlNode * data, xmlNode ** output_data,
                      int call_options, int timeout)
 {
     int rc = 0;
     int reply_id = -1;
     enum crm_ipc_flags ipc_flags = crm_ipc_client_none;
 
     xmlNode *op_msg = NULL;
     xmlNode *op_reply = NULL;
 
     stonith_private_t *native = stonith->private;
 
     if (stonith->state == stonith_disconnected) {
         return -ENOTCONN;
     }
 
     if (output_data != NULL) {
         *output_data = NULL;
     }
 
     if (op == NULL) {
         crm_err("No operation specified");
         return -EINVAL;
     }
 
     if (call_options & st_opt_sync_call) {
         ipc_flags |= crm_ipc_client_response;
     }
 
     stonith->call_id++;
     /* prevent call_id from being negative (or zero) and conflicting
      *    with the stonith_errors enum
      * use 2 because we use it as (stonith->call_id - 1) below
      */
     if (stonith->call_id < 1) {
         stonith->call_id = 1;
     }
 
     CRM_CHECK(native->token != NULL,;
         );
     op_msg = stonith_create_op(stonith->call_id, native->token, op, data, call_options);
     if (op_msg == NULL) {
         return -EINVAL;
     }
 
     crm_xml_add_int(op_msg, F_STONITH_TIMEOUT, timeout);
     crm_trace("Sending %s message to STONITH service, Timeout: %ds", op, timeout);
 
     rc = crm_ipc_send(native->ipc, op_msg, ipc_flags, 1000 * (timeout + 60), &op_reply);
     free_xml(op_msg);
 
     if (rc < 0) {
         crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%ds): %d", op, timeout, rc);
         rc = -ECOMM;
         goto done;
     }
 
     crm_log_xml_trace(op_reply, "Reply");
 
     if (!(call_options & st_opt_sync_call)) {
         crm_trace("Async call %d, returning", stonith->call_id);
         CRM_CHECK(stonith->call_id != 0, return -EPROTO);
         free_xml(op_reply);
 
         return stonith->call_id;
     }
 
     rc = pcmk_ok;
     crm_element_value_int(op_reply, F_STONITH_CALLID, &reply_id);
 
     if (reply_id == stonith->call_id) {
         crm_trace("Syncronous reply %d received", reply_id);
 
         if (crm_element_value_int(op_reply, F_STONITH_RC, &rc) != 0) {
             rc = -ENOMSG;
         }
 
         if ((call_options & st_opt_discard_reply) || output_data == NULL) {
             crm_trace("Discarding reply");
 
         } else {
             *output_data = op_reply;
             op_reply = NULL;    /* Prevent subsequent free */
         }
 
     } else if (reply_id <= 0) {
-        crm_err("Recieved bad reply: No id set");
+        crm_err("Received bad reply: No id set");
         crm_log_xml_err(op_reply, "Bad reply");
         free_xml(op_reply);
         rc = -ENOMSG;
 
     } else {
-        crm_err("Recieved bad reply: %d (wanted %d)", reply_id, stonith->call_id);
+        crm_err("Received bad reply: %d (wanted %d)", reply_id, stonith->call_id);
         crm_log_xml_err(op_reply, "Old reply");
         free_xml(op_reply);
         rc = -ENOMSG;
     }
 
   done:
     if (crm_ipc_connected(native->ipc) == FALSE) {
         crm_err("STONITH disconnected");
         stonith->state = stonith_disconnected;
     }
 
     free_xml(op_reply);
     return rc;
 }
 
 /* Not used with mainloop */
 bool
 stonith_dispatch(stonith_t * st)
 {
     gboolean stay_connected = TRUE;
     stonith_private_t *private = NULL;
 
     CRM_ASSERT(st != NULL);
     private = st->private;
 
     while (crm_ipc_ready(private->ipc)) {
 
         if (crm_ipc_read(private->ipc) > 0) {
             const char *msg = crm_ipc_buffer(private->ipc);
 
             stonith_dispatch_internal(msg, strlen(msg), st);
         }
 
         if (crm_ipc_connected(private->ipc) == FALSE) {
             crm_err("Connection closed");
             stay_connected = FALSE;
         }
     }
 
     return stay_connected;
 }
 
 int
 stonith_dispatch_internal(const char *buffer, ssize_t length, gpointer userdata)
 {
     const char *type = NULL;
     struct notify_blob_s blob;
 
     stonith_t *st = userdata;
     stonith_private_t *private = NULL;
 
     CRM_ASSERT(st != NULL);
     private = st->private;
 
     blob.stonith = st;
     blob.xml = string2xml(buffer);
     if (blob.xml == NULL) {
         crm_warn("Received a NULL msg from STONITH service: %s.", buffer);
         return 0;
     }
 
     /* do callbacks */
     type = crm_element_value(blob.xml, F_TYPE);
     crm_trace("Activating %s callbacks...", type);
 
     if (safe_str_eq(type, T_STONITH_NG)) {
         stonith_perform_callback(st, blob.xml, 0, 0);
 
     } else if (safe_str_eq(type, T_STONITH_NOTIFY)) {
         g_list_foreach(private->notify_list, stonith_send_notification, &blob);
     } else if (safe_str_eq(type, T_STONITH_TIMEOUT_VALUE)) {
         int call_id = 0;
         int timeout = 0;
 
         crm_element_value_int(blob.xml, F_STONITH_TIMEOUT, &timeout);
         crm_element_value_int(blob.xml, F_STONITH_CALLID, &call_id);
 
         update_callback_timeout(call_id, timeout, st);
     } else {
         crm_err("Unknown message type: %s", type);
         crm_log_xml_warn(blob.xml, "BadReply");
     }
 
     free_xml(blob.xml);
     return 1;
 }
 
 static int
 stonith_api_free(stonith_t * stonith)
 {
     int rc = pcmk_ok;
 
     crm_trace("Destroying %p", stonith);
 
     if (stonith->state != stonith_disconnected) {
         crm_trace("Disconnecting %p first", stonith);
         rc = stonith->cmds->disconnect(stonith);
     }
 
     if (stonith->state == stonith_disconnected) {
         stonith_private_t *private = stonith->private;
 
         crm_trace("Removing %d callbacks", g_hash_table_size(private->stonith_op_callback_table));
         g_hash_table_destroy(private->stonith_op_callback_table);
 
         crm_trace("Destroying %d notification clients", g_list_length(private->notify_list));
         g_list_free_full(private->notify_list, free);
 
         free(stonith->private);
         free(stonith->cmds);
         free(stonith);
 
     } else {
         crm_err("Not free'ing active connection: %s (%d)", pcmk_strerror(rc), rc);
     }
 
     return rc;
 }
 
 void
 stonith_api_delete(stonith_t * stonith)
 {
     crm_trace("Destroying %p", stonith);
     if(stonith) {
         stonith->cmds->free(stonith);
     }
 }
 
 stonith_t *
 stonith_api_new(void)
 {
     stonith_t *new_stonith = NULL;
     stonith_private_t *private = NULL;
 
     new_stonith = calloc(1, sizeof(stonith_t));
     private = calloc(1, sizeof(stonith_private_t));
     new_stonith->private = private;
 
     private->stonith_op_callback_table = g_hash_table_new_full(g_direct_hash, g_direct_equal,
                                                                NULL, stonith_destroy_op_callback);
     private->notify_list = NULL;
 
     new_stonith->call_id = 1;
     new_stonith->state = stonith_disconnected;
 
     new_stonith->cmds = calloc(1, sizeof(stonith_api_operations_t));
 
 /* *INDENT-OFF* */
     new_stonith->cmds->free       = stonith_api_free;
     new_stonith->cmds->connect    = stonith_api_signon;
     new_stonith->cmds->disconnect = stonith_api_signoff;
 
     new_stonith->cmds->list       = stonith_api_list;
     new_stonith->cmds->monitor    = stonith_api_monitor;
     new_stonith->cmds->status     = stonith_api_status;
     new_stonith->cmds->fence      = stonith_api_fence;
     new_stonith->cmds->confirm    = stonith_api_confirm;
     new_stonith->cmds->history    = stonith_api_history;
 
     new_stonith->cmds->list_agents  = stonith_api_device_list;
     new_stonith->cmds->metadata     = stonith_api_device_metadata;
 
     new_stonith->cmds->query           = stonith_api_query;
     new_stonith->cmds->remove_device   = stonith_api_remove_device;
     new_stonith->cmds->register_device = stonith_api_register_device;
 
     new_stonith->cmds->remove_level    = stonith_api_remove_level;
     new_stonith->cmds->register_level  = stonith_api_register_level;
 
     new_stonith->cmds->remove_callback       = stonith_api_del_callback;
     new_stonith->cmds->register_callback     = stonith_api_add_callback;
     new_stonith->cmds->remove_notification   = stonith_api_del_notification;
     new_stonith->cmds->register_notification = stonith_api_add_notification;
 /* *INDENT-ON* */
 
     return new_stonith;
 }
 
 stonith_key_value_t *
 stonith_key_value_add(stonith_key_value_t * head, const char *key, const char *value)
 {
     stonith_key_value_t *p, *end;
 
     p = calloc(1, sizeof(stonith_key_value_t));
     if (key) {
         p->key = strdup(key);
     }
     if (value) {
         p->value = strdup(value);
     }
 
     end = head;
     while (end && end->next) {
         end = end->next;
     }
 
     if (end) {
         end->next = p;
     } else {
         head = p;
     }
 
     return head;
 }
 
 void
 stonith_key_value_freeall(stonith_key_value_t * head, int keys, int values)
 {
     stonith_key_value_t *p;
 
     while (head) {
         p = head->next;
         if (keys) {
             free(head->key);
         }
         if (values) {
             free(head->value);
         }
         free(head);
         head = p;
     }
 }
 
 int
 stonith_api_kick(int nodeid, const char *uname, int timeout, bool off)
 {
     char *name = NULL;
     const char *action = "reboot";
 
     int rc = -EPROTO;
     stonith_t *st = NULL;
     enum stonith_call_options opts = st_opt_sync_call | st_opt_allow_suicide;
 
     st = stonith_api_new();
     if (st) {
         rc = st->cmds->connect(st, "stonith-api", NULL);
     }
 
     if (uname != NULL) {
         name = strdup(uname);
 
     } else if (nodeid > 0) {
         opts |= st_opt_cs_nodeid;
         name = crm_itoa(nodeid);
     }
 
     if (off) {
         action = "off";
     }
 
     if (rc == pcmk_ok) {
         rc = st->cmds->fence(st, opts, name, action, timeout, 0);
     }
 
     if (st) {
         st->cmds->disconnect(st);
         stonith_api_delete(st);
     }
 
     free(name);
     return rc;
 }
 
 time_t
 stonith_api_time(int nodeid, const char *uname, bool in_progress)
 {
     int rc = 0;
     char *name = NULL;
 
     time_t when = 0;
     time_t progress = 0;
     stonith_t *st = NULL;
     stonith_history_t *history, *hp = NULL;
     enum stonith_call_options opts = st_opt_sync_call;
 
     st = stonith_api_new();
     if (st) {
         rc = st->cmds->connect(st, "stonith-api", NULL);
     }
 
     if (uname != NULL) {
         name = strdup(uname);
 
     } else if (nodeid > 0) {
         opts |= st_opt_cs_nodeid;
         name = crm_itoa(nodeid);
     }
 
     if (st && rc == pcmk_ok) {
         st->cmds->history(st, st_opt_sync_call | st_opt_cs_nodeid, name, &history, 120);
 
         for (hp = history; hp; hp = hp->next) {
             if (in_progress) {
                 if (hp->state != st_done && hp->state != st_failed) {
                     progress = time(NULL);
                 }
 
             } else if (hp->state == st_done) {
                 when = hp->completed;
             }
         }
     }
 
     if (progress) {
         when = progress;
     }
 
     if (st) {
         st->cmds->disconnect(st);
         stonith_api_delete(st);
     }
 
     free(name);
     return when;
 }
 
 #if HAVE_STONITH_STONITH_H
 #  include <pils/plugin.h>
 
 const char *i_hate_pils(int rc);
 
 const char *
 i_hate_pils(int rc)
 {
     return PIL_strerror(rc);
 }
 #endif
diff --git a/mcp/pacemaker.c b/mcp/pacemaker.c
index b3eb72eaf7..ed1838a43e 100644
--- a/mcp/pacemaker.c
+++ b/mcp/pacemaker.c
@@ -1,1044 +1,1044 @@
 /*
  * Copyright (C) 2010 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 #include <pacemaker.h>
 
 #include <pwd.h>
 #include <grp.h>
 #include <sys/stat.h>
 #include <sys/types.h>
 #include <sys/time.h>
 #include <sys/resource.h>
 #include <sys/reboot.h>
 
 #include <crm/msg_xml.h>
 #include <crm/common/ipcs.h>
 #include <crm/common/mainloop.h>
 #include <crm/cluster/internal.h>
 #include <crm/cluster.h>
 
 #include <dirent.h>
 #include <ctype.h>
 gboolean fatal_error = FALSE;
 GMainLoop *mainloop = NULL;
 
 #define PCMK_PROCESS_CHECK_INTERVAL 5
 
 const char *local_name = NULL;
 uint32_t local_nodeid = 0;
 crm_trigger_t *shutdown_trigger = NULL;
 const char *pid_file = "/var/run/pacemaker.pid";
 
 typedef struct pcmk_child_s {
     int pid;
     long flag;
     int start_seq;
     int respawn_count;
     gboolean respawn;
     const char *name;
     const char *uid;
     const char *command;
 
     gboolean active_before_startup;
 } pcmk_child_t;
 
 /* Index into the array below */
 #define pcmk_child_crmd  4
 #define pcmk_child_mgmtd 8
 /* *INDENT-OFF* */
 static pcmk_child_t pcmk_children[] = {
     { 0, crm_proc_none,       0, 0, FALSE, "none",       NULL,            NULL },
     { 0, crm_proc_plugin,     0, 0, FALSE, "ais",        NULL,            NULL },
     { 0, crm_proc_lrmd,       3, 0, TRUE,  "lrmd",       NULL,            CRM_DAEMON_DIR"/lrmd" },
     { 0, crm_proc_cib,        1, 0, TRUE,  "cib",        CRM_DAEMON_USER, CRM_DAEMON_DIR"/cib" },
     { 0, crm_proc_crmd,       6, 0, TRUE,  "crmd",       CRM_DAEMON_USER, CRM_DAEMON_DIR"/crmd" },
     { 0, crm_proc_attrd,      4, 0, TRUE,  "attrd",      CRM_DAEMON_USER, CRM_DAEMON_DIR"/attrd" },
     { 0, crm_proc_stonithd,   0, 0, TRUE,  "stonithd",   NULL,            NULL },
     { 0, crm_proc_pe,         5, 0, TRUE,  "pengine",    CRM_DAEMON_USER, CRM_DAEMON_DIR"/pengine" },
     { 0, crm_proc_mgmtd,      0, 0, TRUE,  "mgmtd",      NULL,            HB_DAEMON_DIR"/mgmtd" },
     { 0, crm_proc_stonith_ng, 2, 0, TRUE,  "stonith-ng", NULL,            CRM_DAEMON_DIR"/stonithd" },
 };
 /* *INDENT-ON* */
 
 static gboolean start_child(pcmk_child_t * child);
 static gboolean check_active_before_startup_processes(gpointer user_data);
 void update_process_clients(crm_client_t *client);
 void update_process_peers(void);
 
 void
 enable_crmd_as_root(gboolean enable)
 {
     if (enable) {
         pcmk_children[pcmk_child_crmd].uid = NULL;
     } else {
         pcmk_children[pcmk_child_crmd].uid = CRM_DAEMON_USER;
     }
 }
 
 void
 enable_mgmtd(gboolean enable)
 {
     if (enable) {
         pcmk_children[pcmk_child_mgmtd].start_seq = 7;
     } else {
         pcmk_children[pcmk_child_mgmtd].start_seq = 0;
     }
 }
 
 static uint32_t
 get_process_list(void)
 {
     int lpc = 0;
     uint32_t procs = 0;
 
     if(is_classic_ais_cluster()) {
         procs |= crm_proc_plugin;
     }
 
     for (lpc = 0; lpc < SIZEOF(pcmk_children); lpc++) {
         if (pcmk_children[lpc].pid != 0) {
             procs |= pcmk_children[lpc].flag;
         }
     }
     return procs;
 }
 
 static void
 pcmk_process_exit(pcmk_child_t * child)
 {
     child->pid = 0;
     child->active_before_startup = FALSE;
 
     /* Broadcast the fact that one of our processes died ASAP
      *
      * Try to get some logging of the cause out first though
      * because we're probably about to get fenced
      *
      * Potentially do this only if respawn_count > N
      * to allow for local recovery
      */
     update_node_processes(local_nodeid, NULL, get_process_list());
 
     child->respawn_count += 1;
     if (child->respawn_count > MAX_RESPAWN) {
         crm_err("Child respawn count exceeded by %s", child->name);
         child->respawn = FALSE;
     }
 
     if (shutdown_trigger) {
         mainloop_set_trigger(shutdown_trigger);
         update_node_processes(local_nodeid, NULL, get_process_list());
 
     } else if (child->respawn) {
         gboolean fail_fast = crm_is_true(getenv("PCMK_fail_fast"));
 
         crm_notice("Respawning failed child process: %s", child->name);
 
 #ifdef RB_HALT_SYSTEM
         if (fail_fast) {
             crm_err("Rebooting system", child->name);
             sync();
             reboot(RB_HALT_SYSTEM);
             crm_exit(DAEMON_RESPAWN_STOP);
         }
 #endif
         start_child(child);
     }
 }
 
 static void
 pcmk_child_exit(mainloop_child_t * p, pid_t pid, int core, int signo, int exitcode)
 {
     pcmk_child_t *child = mainloop_child_userdata(p);
     const char *name = mainloop_child_name(p);
 
     if (signo) {
         crm_notice("Child process %s terminated with signal %d (pid=%d, core=%d)",
                    name, signo, pid, core);
 
     } else {
         do_crm_log(exitcode == 0 ? LOG_INFO : LOG_ERR,
                    "Child process %s (%d) exited: %s (%d)", name, pid, pcmk_strerror(exitcode), exitcode);
     }
 
     if (exitcode == 100) {
         crm_warn("Pacemaker child process %s no longer wishes to be respawned. "
                  "Shutting ourselves down.", name);
         child->respawn = FALSE;
         fatal_error = TRUE;
         pcmk_shutdown(15);
     }
 
     pcmk_process_exit(child);
 }
 
 static gboolean
 stop_child(pcmk_child_t * child, int signal)
 {
     if (signal == 0) {
         signal = SIGTERM;
     }
 
     if (child->command == NULL) {
         crm_debug("Nothing to do for child \"%s\"", child->name);
         return TRUE;
     }
 
     if (child->pid <= 0) {
         crm_trace("Client %s not running", child->name);
         return TRUE;
     }
 
     errno = 0;
     if (kill(child->pid, signal) == 0) {
         crm_notice("Stopping %s: Sent -%d to process %d", child->name, signal, child->pid);
 
     } else {
         crm_perror(LOG_ERR, "Stopping %s: Could not send -%d to process %d failed",
                    child->name, signal, child->pid);
     }
 
     return TRUE;
 }
 
 static char *opts_default[] = { NULL, NULL };
 static char *opts_vgrind[] = { NULL, NULL, NULL, NULL, NULL };
 
 static gboolean
 start_child(pcmk_child_t * child)
 {
     int lpc = 0;
     uid_t uid = 0;
     gid_t gid = 0;
     struct rlimit oflimits;
     gboolean use_valgrind = FALSE;
     gboolean use_callgrind = FALSE;
     const char *devnull = "/dev/null";
     const char *env_valgrind = getenv("PCMK_valgrind_enabled");
     const char *env_callgrind = getenv("PCMK_callgrind_enabled");
     enum cluster_type_e stack = get_cluster_type();
 
     child->active_before_startup = FALSE;
 
     if (child->command == NULL) {
         crm_info("Nothing to do for child \"%s\"", child->name);
         return TRUE;
     }
 
     if (env_callgrind != NULL && crm_is_true(env_callgrind)) {
         use_callgrind = TRUE;
         use_valgrind = TRUE;
 
     } else if (env_callgrind != NULL && strstr(env_callgrind, child->name)) {
         use_callgrind = TRUE;
         use_valgrind = TRUE;
 
     } else if (env_valgrind != NULL && crm_is_true(env_valgrind)) {
         use_valgrind = TRUE;
 
     } else if (env_valgrind != NULL && strstr(env_valgrind, child->name)) {
         use_valgrind = TRUE;
     }
 
     if (use_valgrind && strlen(VALGRIND_BIN) == 0) {
         crm_warn("Cannot enable valgrind for %s:"
                  " The location of the valgrind binary is unknown", child->name);
         use_valgrind = FALSE;
     }
 
     if (child->uid) {
         if (crm_user_lookup(child->uid, &uid, &gid) < 0) {
             crm_err("Invalid user (%s) for %s: not found", child->uid, child->name);
             return FALSE;
         }
         crm_info("Using uid=%u and group=%u for process %s", uid, gid, child->name);
     }
 
     child->pid = fork();
     CRM_ASSERT(child->pid != -1);
 
     if (child->pid > 0) {
         /* parent */
         mainloop_child_add(child->pid, 0, child->name, child, pcmk_child_exit);
 
         crm_info("Forked child %d for process %s%s", child->pid, child->name,
                  use_valgrind ? " (valgrind enabled: " VALGRIND_BIN ")" : "");
         update_node_processes(local_nodeid, NULL, get_process_list());
         return TRUE;
 
     } else {
         /* Start a new session */
         (void)setsid();
 
         /* Setup the two alternate arg arrarys */
         opts_vgrind[0] = strdup(VALGRIND_BIN);
         if (use_callgrind) {
             opts_vgrind[1] = strdup("--tool=callgrind");
             opts_vgrind[2] = strdup("--callgrind-out-file=" CRM_STATE_DIR "/callgrind.out.%p");
             opts_vgrind[3] = strdup(child->command);
             opts_vgrind[4] = NULL;
         } else {
             opts_vgrind[1] = strdup(child->command);
             opts_vgrind[2] = NULL;
             opts_vgrind[3] = NULL;
             opts_vgrind[4] = NULL;
         }
         opts_default[0] = strdup(child->command);;
 
         if(gid) {
             if(stack == pcmk_cluster_corosync) {
                 /* Drop root privileges completely
                  *
                  * We can do this because we set uidgid.gid.${gid}=1
                  * via CMAP which allows these processes to connect to
                  * corosync
                  */
                 if (setgid(gid) < 0) {
                     crm_perror(LOG_ERR, "Could not set group to %d", gid);
                 }
 
                 /* Keep the root group (so we can access corosync), but add the haclient group (so we can access ipc) */
             } else if (initgroups(child->uid, gid) < 0) {
                 crm_err("Cannot initalize groups for %s: %s (%d)", child->uid, pcmk_strerror(errno), errno);
             }
         }
 
         if (uid && setuid(uid) < 0) {
             crm_perror(LOG_ERR, "Could not set user to %d (%s)", uid, child->uid);
         }
 
         /* Close all open file descriptors */
         getrlimit(RLIMIT_NOFILE, &oflimits);
         for (lpc = 0; lpc < oflimits.rlim_cur; lpc++) {
             close(lpc);
         }
 
         (void)open(devnull, O_RDONLY);  /* Stdin:  fd 0 */
         (void)open(devnull, O_WRONLY);  /* Stdout: fd 1 */
         (void)open(devnull, O_WRONLY);  /* Stderr: fd 2 */
 
         if (use_valgrind) {
             (void)execvp(VALGRIND_BIN, opts_vgrind);
         } else {
             (void)execvp(child->command, opts_default);
         }
         crm_perror(LOG_ERR, "FATAL: Cannot exec %s", child->command);
         crm_exit(DAEMON_RESPAWN_STOP);
     }
     return TRUE;                /* never reached */
 }
 
 static gboolean
 escalate_shutdown(gpointer data)
 {
 
     pcmk_child_t *child = data;
 
     if (child->pid) {
         /* Use SIGSEGV instead of SIGKILL to create a core so we can see what it was up to */
         crm_err("Child %s not terminating in a timely manner, forcing", child->name);
         stop_child(child, SIGSEGV);
     }
     return FALSE;
 }
 
 static gboolean
 pcmk_shutdown_worker(gpointer user_data)
 {
     static int phase = 0;
     static time_t next_log = 0;
     static int max = SIZEOF(pcmk_children);
 
     int lpc = 0;
 
     if (phase == 0) {
         crm_notice("Shuting down Pacemaker");
         phase = max;
 
         /* Add a second, more frequent, check to speed up shutdown */
         g_timeout_add_seconds(5, check_active_before_startup_processes, NULL);
     }
 
     for (; phase > 0; phase--) {
         /* dont stop anything with start_seq < 1 */
 
         for (lpc = max - 1; lpc >= 0; lpc--) {
             pcmk_child_t *child = &(pcmk_children[lpc]);
 
             if (phase != child->start_seq) {
                 continue;
             }
 
             if (child->pid) {
                 time_t now = time(NULL);
 
                 if (child->respawn) {
                     next_log = now + 30;
                     child->respawn = FALSE;
                     stop_child(child, SIGTERM);
                     if (phase < pcmk_children[pcmk_child_crmd].start_seq) {
                         g_timeout_add(180000 /* 3m */ , escalate_shutdown, child);
                     }
 
                 } else if (now >= next_log) {
                     next_log = now + 30;
                     crm_notice("Still waiting for %s (pid=%d, seq=%d) to terminate...",
                                child->name, child->pid, child->start_seq);
                 }
                 return TRUE;
             }
 
             /* cleanup */
             crm_debug("%s confirmed stopped", child->name);
             child->pid = 0;
         }
     }
 
     /* send_cluster_id(); */
     crm_notice("Shutdown complete");
     g_main_loop_quit(mainloop);
 
     if (fatal_error) {
         crm_notice("Attempting to inhibit respawning after fatal error");
         crm_exit(DAEMON_RESPAWN_STOP);
     }
 
     return TRUE;
 }
 
 void
 pcmk_shutdown(int nsig)
 {
     if (shutdown_trigger == NULL) {
         shutdown_trigger = mainloop_add_trigger(G_PRIORITY_HIGH, pcmk_shutdown_worker, NULL);
     }
     mainloop_set_trigger(shutdown_trigger);
 }
 
 static int32_t
 pcmk_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
 {
     crm_trace("Connection %p", c);
     if (crm_client_new(c, uid, gid) == NULL) {
         return -EIO;
     }
     return 0;
 }
 
 static void
 pcmk_ipc_created(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection %p", c);
 }
 
 /* Exit code means? */
 static int32_t
 pcmk_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
 {
     uint32_t id = 0;
     uint32_t flags = 0;
     const char *task = NULL;
     crm_client_t *c = crm_client_get(qbc);
     xmlNode *msg = crm_ipcs_recv(c, data, size, &id, &flags);
 
     if (flags & crm_ipc_client_response) {
         crm_ipcs_send_ack(c, id, "ack", __FUNCTION__, __LINE__);
     }
 
     if (msg == NULL) {
         return 0;
     }
 
     task = crm_element_value(msg, F_CRM_TASK);
     if (crm_str_eq(task, CRM_OP_QUIT, TRUE)) {
         /* Time to quit */
         crm_notice("Shutting down in responce to ticket %s (%s)",
                    crm_element_value(msg, F_CRM_REFERENCE), crm_element_value(msg, F_CRM_ORIGIN));
         pcmk_shutdown(15);
 
     } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) {
         /* Send to everyone */
         struct iovec *iov;
         int id = 0;
         const char *name = NULL;
 
         crm_element_value_int(msg, XML_ATTR_ID, &id);
         name = crm_element_value(msg, XML_ATTR_UNAME);
         crm_notice("Instructing peers to remove references to node %s/%u", name, id);
 
         iov = calloc(1, sizeof(struct iovec));
         iov->iov_base = dump_xml_unformatted(msg);
         iov->iov_len = 1 + strlen(iov->iov_base);
         send_cpg_iov(iov);
 
     } else {
         update_process_clients(c);
     }
 
     free_xml(msg);
     return 0;
 }
 
 /* Error code means? */
 static int32_t
 pcmk_ipc_closed(qb_ipcs_connection_t * c)
 {
     crm_client_t *client = crm_client_get(c);
 
     crm_trace("Connection %p", c);
     crm_client_destroy(client);
     return 0;
 }
 
 static void
 pcmk_ipc_destroy(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection %p", c);
 }
 
 struct qb_ipcs_service_handlers mcp_ipc_callbacks = {
     .connection_accept = pcmk_ipc_accept,
     .connection_created = pcmk_ipc_created,
     .msg_process = pcmk_ipc_dispatch,
     .connection_closed = pcmk_ipc_closed,
     .connection_destroyed = pcmk_ipc_destroy
 };
 
 void
 update_process_clients(crm_client_t *client)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
     xmlNode *update = create_xml_node(NULL, "nodes");
 
     crm_trace("Sending process list to %d children", crm_hash_table_size(client_connections));
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
         xmlNode *xml = create_xml_node(update, "node");
 
         crm_xml_add_int(xml, "id", node->id);
         crm_xml_add(xml, "uname", node->uname);
         crm_xml_add(xml, "state", node->state);
         crm_xml_add_int(xml, "processes", node->processes);
     }
 
     if(client) {
         crm_ipcs_send(client, 0, update, TRUE);
 
     } else {
         g_hash_table_iter_init(&iter, client_connections);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & client)) {
             crm_ipcs_send(client, 0, update, TRUE);
         }
     }
 
     free_xml(update);
 }
 
 void
 update_process_peers(void)
 {
     /* Do nothing for corosync-2 based clusters */
 
     char buffer[1024];
     struct iovec *iov;
     int rc = 0;
 
     memset(buffer, 0, SIZEOF(buffer));
 
     if (local_name) {
         rc = snprintf(buffer, SIZEOF(buffer) - 1, "<node uname=\"%s\" proclist=\"%u\"/>",
                       local_name, get_process_list());
     } else {
         rc = snprintf(buffer, SIZEOF(buffer) - 1, "<node proclist=\"%u\"/>", get_process_list());
     }
 
     crm_trace("Sending %s", buffer);
     iov = calloc(1, sizeof(struct iovec));
     iov->iov_base = strdup(buffer);
     iov->iov_len = rc + 1;
     send_cpg_iov(iov);
 }
 
 gboolean
 update_node_processes(uint32_t id, const char *uname, uint32_t procs)
 {
     gboolean changed = FALSE;
     crm_node_t *node = crm_get_peer(id, uname);
 
     if (procs != 0) {
         if (procs != node->processes) {
             crm_debug("Node %s now has process list: %.32x (was %.32x)",
                       node->uname, procs, node->processes);
             node->processes = procs;
             changed = TRUE;
 
         } else {
             crm_trace("Node %s still has process list: %.32x", node->uname, procs);
         }
     }
 
     if (changed && id == local_nodeid) {
         update_process_clients(NULL);
         update_process_peers();
     }
     return changed;
 }
 
 
 /* *INDENT-OFF* */
 static struct crm_option long_options[] = {
     /* Top-level Options */
     {"help",           0, 0, '?', "\tThis text"},
     {"version",        0, 0, '$', "\tVersion information"  },
     {"verbose",        0, 0, 'V', "\tIncrease debug output"},
     {"shutdown",       0, 0, 'S', "\tInstruct Pacemaker to shutdown on this machine"},
     {"features",       0, 0, 'F', "\tDisplay the full version and list of features Pacemaker was built with"},
 
     {"-spacer-",       1, 0, '-', "\nAdditional Options:"},
     {"foreground",     0, 0, 'f', "\t(Ignored) Pacemaker always runs in the foreground"},
     {"pid-file",       1, 0, 'p', "\t(Ignored) Daemon pid file location"},
 
     {NULL, 0, 0, 0}
 };
 /* *INDENT-ON* */
 
 static void
 mcp_chown(const char *path, uid_t uid, gid_t gid)
 {
     int rc = chown(path, uid, gid);
 
     if (rc < 0) {
         crm_warn("Cannot change the ownership of %s to user %s and gid %d: %s",
                  path, CRM_DAEMON_USER, gid, pcmk_strerror(errno));
     }
 }
 
 static gboolean
 check_active_before_startup_processes(gpointer user_data)
 {
     int start_seq = 1, lpc = 0;
     static int max = SIZEOF(pcmk_children);
     gboolean keep_tracking = FALSE;
 
     for (start_seq = 1; start_seq < max; start_seq++) {
         for (lpc = 0; lpc < max; lpc++) {
             if (pcmk_children[lpc].active_before_startup == FALSE) {
                 /* we are already tracking it as a child process. */
                 continue;
             } else if (start_seq != pcmk_children[lpc].start_seq) {
                 continue;
             } else if (crm_pid_active(pcmk_children[lpc].pid) != 1) {
                 crm_notice("Process %s terminated (pid=%d)",
                            pcmk_children[lpc].name, pcmk_children[lpc].pid);
                 pcmk_process_exit(&(pcmk_children[lpc]));
                 continue;
             }
             /* at least one of the processes found at startup
              * is still going, so keep this recurring timer around */
             keep_tracking = TRUE;
         }
     }
 
     return keep_tracking;
 }
 
 static void
 find_and_track_existing_processes(void)
 {
     DIR *dp;
     struct dirent *entry;
     struct stat statbuf;
     int start_tracker = 0;
 
     dp = opendir("/proc");
     if (!dp) {
         /* no proc directory to search through */
         crm_notice("Can not read /proc directory to track existing components");
         return;
     }
 
     while ((entry = readdir(dp)) != NULL) {
         char procpath[128];
         char value[64];
         char key[16];
         FILE *file;
         int pid;
         int max = SIZEOF(pcmk_children);
         int i;
 
         strcpy(procpath, "/proc/");
         /* strlen("/proc/") + strlen("/status") + 1 = 14
          * 128 - 14 = 114 */
         strncat(procpath, entry->d_name, 114);
 
         if (lstat(procpath, &statbuf)) {
             continue;
         }
         if (!S_ISDIR(statbuf.st_mode) || !isdigit(entry->d_name[0])) {
             continue;
         }
 
         strcat(procpath, "/status");
 
         file = fopen(procpath, "r");
         if (!file) {
             continue;
         }
         if (fscanf(file, "%15s%63s", key, value) != 2) {
             fclose(file);
             continue;
         }
         fclose(file);
 
         pid = atoi(entry->d_name);
         if (pid <= 0) {
             continue;
         }
 
         for (i = 0; i < max; i++) {
             const char *name = pcmk_children[i].name;
 
             if (pcmk_children[i].start_seq == 0) {
                 continue;
             }
             if (pcmk_children[i].flag == crm_proc_stonith_ng) {
                 name = "stonithd";
             }
             if (safe_str_eq(name, value)) {
                 if (crm_pid_active(pid) != 1) {
                     continue;
                 }
                 crm_notice("Tracking existing %s process (pid=%d)", value, pid);
                 pcmk_children[i].pid = pid;
                 pcmk_children[i].active_before_startup = TRUE;
                 start_tracker = 1;
             }
         }
     }
 
     if (start_tracker) {
         g_timeout_add_seconds(PCMK_PROCESS_CHECK_INTERVAL, check_active_before_startup_processes,
                               NULL);
     }
     closedir(dp);
 }
 
 static void
 init_children_processes(void)
 {
     int start_seq = 1, lpc = 0;
     static int max = SIZEOF(pcmk_children);
 
     /* start any children that have not been detected */
     for (start_seq = 1; start_seq < max; start_seq++) {
         /* dont start anything with start_seq < 1 */
         for (lpc = 0; lpc < max; lpc++) {
             if (pcmk_children[lpc].pid) {
                 /* we are already tracking it */
                 continue;
             }
 
             if (start_seq == pcmk_children[lpc].start_seq) {
                 start_child(&(pcmk_children[lpc]));
             }
         }
     }
 }
 
 static void
 mcp_cpg_destroy(gpointer user_data)
 {
     crm_err("Connection destroyed");
     crm_exit(ENOTCONN);
 }
 
 static void
 mcp_cpg_deliver(cpg_handle_t handle,
                  const struct cpg_name *groupName,
                  uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     xmlNode *xml = string2xml(msg);
     const char *task = crm_element_value(xml, F_CRM_TASK);
 
-    crm_trace("Recieved %s %.200s", task, msg);
+    crm_trace("Received %s %.200s", task, msg);
     if (task == NULL && nodeid != local_nodeid) {
         uint32_t procs = 0;
         const char *uname = crm_element_value(xml, "uname");
 
         crm_element_value_int(xml, "proclist", (int *)&procs);
         /* crm_debug("Got proclist %.32x from %s", procs, uname); */
         if (update_node_processes(nodeid, uname, procs)) {
             update_process_clients(NULL);
         }
 
     } else if (crm_str_eq(task, CRM_OP_RM_NODE_CACHE, TRUE)) {
         int id = 0;
         const char *name = NULL;
 
         crm_element_value_int(xml, XML_ATTR_ID, &id);
         name = crm_element_value(xml, XML_ATTR_UNAME);
         reap_crm_member(id, name);
     }
 }
 
 static void
 mcp_cpg_membership(cpg_handle_t handle,
                     const struct cpg_name *groupName,
                     const struct cpg_address *member_list, size_t member_list_entries,
                     const struct cpg_address *left_list, size_t left_list_entries,
                     const struct cpg_address *joined_list, size_t joined_list_entries)
 {
     /* Don't care about CPG membership, but we do want to broadcast our own presence */
     update_process_peers();
 }
 
 static gboolean
 mcp_quorum_callback(unsigned long long seq, gboolean quorate)
 {
     /* Nothing to do */
     return TRUE;
 }
 
 static void
 mcp_quorum_destroy(gpointer user_data)
 {
     crm_info("connection closed");
 }
 
 int
 main(int argc, char **argv)
 {
     int rc;
     int flag;
     int argerr = 0;
 
     int option_index = 0;
     gboolean shutdown = FALSE;
 
     uid_t pcmk_uid = 0;
     gid_t pcmk_gid = 0;
     struct rlimit cores;
     crm_ipc_t *old_instance = NULL;
     qb_ipcs_service_t *ipcs = NULL;
     const char *facility = daemon_option("logfacility");
     static crm_cluster_t cluster;
 
     setenv("LC_ALL", "C", 1);
     setenv("HA_LOGD", "no", 1);
 
     set_daemon_option("mcp", "true");
     set_daemon_option("use_logd", "off");
 
     crm_log_init(NULL, LOG_INFO, TRUE, FALSE, argc, argv, FALSE);
     crm_set_options(NULL, "mode [options]", long_options, "Start/Stop Pacemaker\n");
 
     /* Restore the original facility so that mcp_read_config() does the right thing */
     set_daemon_option("logfacility", facility);
 
     while (1) {
         flag = crm_get_option(argc, argv, &option_index);
         if (flag == -1)
             break;
 
         switch (flag) {
             case 'V':
                 crm_bump_log_level(argc, argv);
                 break;
             case 'f':
                 /* Legacy */
                 break;
             case 'p':
                 pid_file = optarg;
                 break;
             case '$':
             case '?':
                 crm_help(flag, EX_OK);
                 break;
             case 'S':
                 shutdown = TRUE;
                 break;
             case 'F':
                 printf("Pacemaker %s (Build: %s)\n Supporting v%s: %s\n", VERSION, BUILD_VERSION,
                        CRM_FEATURE_SET, CRM_FEATURES);
                 crm_exit(pcmk_ok);
             default:
                 printf("Argument code 0%o (%c) is not (?yet?) supported\n", flag, flag);
                 ++argerr;
                 break;
         }
     }
 
     if (optind < argc) {
         printf("non-option ARGV-elements: ");
         while (optind < argc)
             printf("%s ", argv[optind++]);
         printf("\n");
     }
     if (argerr) {
         crm_help('?', EX_USAGE);
     }
 
     crm_debug("Checking for old instances of %s", CRM_SYSTEM_MCP);
     old_instance = crm_ipc_new(CRM_SYSTEM_MCP, 0);
     crm_ipc_connect(old_instance);
 
     if (shutdown) {
         crm_debug("Terminating previous instance");
         while (crm_ipc_connected(old_instance)) {
             xmlNode *cmd =
                 create_request(CRM_OP_QUIT, NULL, NULL, CRM_SYSTEM_MCP, CRM_SYSTEM_MCP, NULL);
 
             crm_debug(".");
             crm_ipc_send(old_instance, cmd, 0, 0, NULL);
             free_xml(cmd);
 
             sleep(2);
         }
         crm_ipc_close(old_instance);
         crm_ipc_destroy(old_instance);
         crm_exit(pcmk_ok);
 
     } else if (crm_ipc_connected(old_instance)) {
         crm_ipc_close(old_instance);
         crm_ipc_destroy(old_instance);
         crm_err("Pacemaker is already active, aborting startup");
         crm_exit(DAEMON_RESPAWN_STOP);
     }
 
     crm_ipc_close(old_instance);
     crm_ipc_destroy(old_instance);
 
     if (mcp_read_config() == FALSE) {
         crm_notice("Could not obtain corosync config data, exiting");
         crm_exit(ENODATA);
     }
 
     crm_notice("Starting Pacemaker %s (Build: %s): %s", VERSION, BUILD_VERSION, CRM_FEATURES);
     mainloop = g_main_new(FALSE);
 
     rc = getrlimit(RLIMIT_CORE, &cores);
     if (rc < 0) {
         crm_perror(LOG_ERR, "Cannot determine current maximum core size.");
     } else {
         if (cores.rlim_max == 0 && geteuid() == 0) {
             cores.rlim_max = RLIM_INFINITY;
         } else {
             crm_info("Maximum core file size is: %lu", (unsigned long)cores.rlim_max);
         }
         cores.rlim_cur = cores.rlim_max;
 
         rc = setrlimit(RLIMIT_CORE, &cores);
         if (rc < 0) {
             crm_perror(LOG_ERR,
                        "Core file generation will remain disabled."
                        " Core files are an important diagnositic tool,"
                        " please consider enabling them by default.");
         }
 #if 0
         /* system() is not thread-safe, can't call from here
          * Actually, its a pretty hacky way to try and achieve this anyway
          */
         if (system("echo 1 > /proc/sys/kernel/core_uses_pid") != 0) {
             crm_perror(LOG_ERR, "Could not enable /proc/sys/kernel/core_uses_pid");
         }
 #endif
     }
 
     if (crm_user_lookup(CRM_DAEMON_USER, &pcmk_uid, &pcmk_gid) < 0) {
         crm_err("Cluster user %s does not exist, aborting Pacemaker startup", CRM_DAEMON_USER);
         crm_exit(ENOKEY);
     }
 
     mkdir(CRM_STATE_DIR, 0750);
     mcp_chown(CRM_STATE_DIR, pcmk_uid, pcmk_gid);
 
     /* Used to store core files in */
     crm_build_path(CRM_CORE_DIR, 0775);
     mcp_chown(CRM_CORE_DIR, pcmk_uid, pcmk_gid);
 
     /* Used to store blackbox dumps in */
     crm_build_path(CRM_BLACKBOX_DIR, 0755);
     mcp_chown(CRM_BLACKBOX_DIR, pcmk_uid, pcmk_gid);
 
     /* Used to store policy engine inputs in */
     crm_build_path(PE_STATE_DIR, 0755);
     mcp_chown(PE_STATE_DIR, pcmk_uid, pcmk_gid);
 
     /* Used to store the cluster configuration */
     crm_build_path(CRM_CONFIG_DIR, 0755);
     mcp_chown(CRM_CONFIG_DIR, pcmk_uid, pcmk_gid);
 
     /* Resource agent paths are constructed by the lrmd */
 
     ipcs = mainloop_add_ipc_server(CRM_SYSTEM_MCP, QB_IPC_NATIVE, &mcp_ipc_callbacks);
     if (ipcs == NULL) {
         crm_err("Couldn't start IPC server");
         crm_exit(EIO);
     }
 
     /* Allows us to block shutdown */
     if (cluster_connect_cfg(&local_nodeid) == FALSE) {
         crm_err("Couldn't connect to Corosync's CFG service");
         crm_exit(ENOPROTOOPT);
     }
 
     cluster.destroy = mcp_cpg_destroy;
     cluster.cpg.cpg_deliver_fn = mcp_cpg_deliver;
     cluster.cpg.cpg_confchg_fn = mcp_cpg_membership;
 
     if(cluster_connect_cpg(&cluster) == FALSE) {
         crm_err("Couldn't connect to Corosync's CPG service");
         crm_exit(ENOPROTOOPT);
     }
 
     if (is_corosync_cluster()) {
         /* Keep the membership list up-to-date for crm_node to query */
         rc = cluster_connect_quorum(mcp_quorum_callback, mcp_quorum_destroy);
     }
 
     local_name = get_local_node_name();
     update_node_processes(local_nodeid, local_name, get_process_list());
 
     mainloop_add_signal(SIGTERM, pcmk_shutdown);
     mainloop_add_signal(SIGINT, pcmk_shutdown);
 
     find_and_track_existing_processes();
     init_children_processes();
 
     crm_info("Starting mainloop");
 
     g_main_run(mainloop);
 
     if (ipcs) {
         crm_trace("Closing IPC server");
         mainloop_del_ipc_server(ipcs);
         ipcs = NULL;
     }
 
     g_main_destroy(mainloop);
 
     cluster_disconnect_cpg(&cluster);
     cluster_disconnect_cfg();
 
     crm_info("Exiting %s", crm_system_name);
 
     crm_exit(pcmk_ok);
 }
diff --git a/mcp/pacemaker.sysconfig b/mcp/pacemaker.sysconfig
index c8274eb7fd..be42fe5eea 100644
--- a/mcp/pacemaker.sysconfig
+++ b/mcp/pacemaker.sysconfig
@@ -1,95 +1,95 @@
 # For non-systemd based systems, prefix export to each enabled line
 
 # Turn on special handling for CMAN clusters in the init script
 # Without this, fenced (and by inference, cman) cannot reliably be made to shut down
 # PCMK_STACK=cman
 
 #==#==# Variables that control logging
 
 # Enable debug logging globally or per-subsystem
 # Multiple subsystems may me listed separated by commas
 # eg. PCMK_debug=crmd,pengine
 # PCMK_debug=yes|no|crmd|pengine|cib|stonith-ng|attrd|pacemakerd
 
 # Send INFO (and higher) messages to the named log file
 # Additional messages may also appear here depending on any configured debug and trace settings
 # By default Pacemaker will inherit the logfile specified in corosync.conf
 # PCMK_debugfile=/var/log/pacemaker.log
 
 # Specify an alternate syslog target for NOTICE (and higher) messages
 # Use 'none' to disable - not recommended
 # The default value is 'daemon'
 # PCMK_logfacility=none|daemon|user|local0|local1|local2|local3|local4|local5|local6|local7
 
 # Send all messages up-to-and-including the configured priority to syslog
 # A value of 'info' will be far too verbose for most installations and 'debug' is almost certain to send you blind
 # The default value is 'notice'
 # PCMK_logpriority=emerg|alert|crit|error|warning|notice|info|debug
 
 # Log all messages from a comma-separated list of functions
 # PCMK_trace_functions=function1,function2,function3
 
 # Log all messages from a comma-separated list of files (no path)
 # Supports wildcards eg. PCMK_trace_files=prefix*.c
 # PCMK_trace_files=file.c,other.h
 
 # Log all messages matching comma-separated list of formats
 # PCMK_trace_formats="Sent delete %d"
 
 # Log all messages from a comma-separated list of tags
 # PCMK_trace_tags=tag1,tag2
 
 # Dump the blackbox whenever the message at function and line is printed
 # eg. PCMK_trace_blackbox=te_graph_trigger:223,unpack_clone:81
 # PCMK_trace_blackbox=fn:line,fn2:line2,...
 
 # Enable blackbox logging globally or per-subsystem
 # The blackbox contains a rolling buffer of all logs (including info+debug+trace)
-# and is written after a crash, assertion failure and/or when SIGTRAP is recieved
+# and is written after a crash, assertion failure and/or when SIGTRAP is received
 #
 # The blackbox recorder can also be enabled for Pacemaker daemons at runtime by sending SIGUSR1
 #
 # Multiple subsystems may me listed separated by commas
 # eg. PCMK_blackbox=crmd,pengine
 # PCMK_blackbox=yes|no|crmd|pengine|cib|stonith-ng|attrd|pacemakerd
 
 #==#==# Advanced use only
 
 # Enable this for compatibility with older corosync (prior to 2.0)
 # based clusters which used the nodes uname as its uuid also
 # PCMK_uname_is_uuid=no
 
 # Specify an alternate location for RNG schemas and XSL transforms
 # Mostly only useful for developer testing
 # PCMK_schema_directory=/some/path
 
 # Enable this for rebooting this machine at the time of process (subsystem) failure
 # PCMK_fail_fast=no
 
 #==#==# Pacemaker Remote
 # Use a custom directory for finding the authkey.
 # PCMK_authkey_location=/etc/pacemaker/authkey
 #
 # Specify a custom port for Pacemaker Remote connections
 # PCMK_remote_port=3121
 
 #==#==# IPC
 
 # Force use of a particular class of IPC connection
 # PCMK_ipc_type=shared-mem|socket|posix|sysv
 
 # Specify an IPC buffer size in bytes
 # Useful when connecting to really big clusters that exceed the default 20k buffer
 # PCMK_ipc_buffer=20480
 
 #==#==# Profiling and memory leak testing
 
 # Variables for running child daemons under valgrind and/or checking for memory problems
 # G_SLICE=always-malloc
 # MALLOC_PERTURB_=221 # or 0
 # MALLOC_CHECK_=3     # or 0,1,2
 # PCMK_valgrind_enabled=yes
 # PCMK_valgrind_enabled=cib,crmd
 # PCMK_callgrind_enabled=yes
 # PCMK_callgrind_enabled=cib,crmd
 # VALGRIND_OPTS="--leak-check=full --trace-children=no --num-callers=25 --log-file=/var/lib/pacemaker/valgrind-%p --suppressions=/usr/share/pacemaker/tests/valgrind-pcmk.suppressions --gen-suppressions=all"
diff --git a/pengine/master.c b/pengine/master.c
index 43d538bc3b..9d5e2fb59a 100644
--- a/pengine/master.c
+++ b/pengine/master.c
@@ -1,1065 +1,1065 @@
 /*
  * Copyright (C) 2004 Andrew Beekhof <andrew@beekhof.net>
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of the GNU General Public
  * License as published by the Free Software Foundation; either
  * version 2 of the License, or (at your option) any later version.
  *
  * This software is distributed in the hope that it will be useful,
  * but WITHOUT ANY WARRANTY; without even the implied warranty of
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  * General Public License for more details.
  *
  * You should have received a copy of the GNU General Public
  * License along with this library; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <allocate.h>
 #include <utils.h>
 
 #define VARIANT_CLONE 1
 #include <lib/pengine/variant.h>
 
 extern gint sort_clone_instance(gconstpointer a, gconstpointer b, gpointer data_set);
 
 static int master_score(resource_t * rsc, node_t * node, int not_set_value);
 
 static void
 child_promoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type,
                             resource_t * rsc, resource_t * child, resource_t * last,
                             pe_working_set_t * data_set)
 {
     if (child == NULL) {
         if (clone_data->ordered && last != NULL) {
             pe_rsc_trace(rsc, "Ordered version (last node)");
             /* last child promote before promoted started */
             new_rsc_order(last, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set);
         }
         return;
     }
 
     /* child promote before global promoted */
     new_rsc_order(child, RSC_PROMOTE, rsc, RSC_PROMOTED, type, data_set);
 
     /* global promote before child promote */
     new_rsc_order(rsc, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set);
 
     if (clone_data->ordered) {
         pe_rsc_trace(rsc, "Ordered version");
         if (last == NULL) {
             /* global promote before first child promote */
             last = rsc;
 
         }
         /* else: child/child relative promote */
         order_start_start(last, child, type);
         new_rsc_order(last, RSC_PROMOTE, child, RSC_PROMOTE, type, data_set);
 
     } else {
         pe_rsc_trace(rsc, "Un-ordered version");
     }
 }
 
 static void
 child_demoting_constraints(clone_variant_data_t * clone_data, enum pe_ordering type,
                            resource_t * rsc, resource_t * child, resource_t * last,
                            pe_working_set_t * data_set)
 {
     if (child == NULL) {
         if (clone_data->ordered && last != NULL) {
             pe_rsc_trace(rsc, "Ordered version (last node)");
             /* global demote before first child demote */
             new_rsc_order(rsc, RSC_DEMOTE, last, RSC_DEMOTE, pe_order_optional, data_set);
         }
         return;
     }
 
     /* child demote before global demoted */
     new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_implies_then_printed, data_set);
 
     /* global demote before child demote */
     new_rsc_order(rsc, RSC_DEMOTE, child, RSC_DEMOTE, pe_order_implies_first_printed, data_set);
 
     if (clone_data->ordered && last != NULL) {
         pe_rsc_trace(rsc, "Ordered version");
 
         /* child/child relative demote */
         new_rsc_order(child, RSC_DEMOTE, last, RSC_DEMOTE, type, data_set);
 
     } else if (clone_data->ordered) {
         pe_rsc_trace(rsc, "Ordered version (1st node)");
         /* first child stop before global stopped */
         new_rsc_order(child, RSC_DEMOTE, rsc, RSC_DEMOTED, type, data_set);
 
     } else {
         pe_rsc_trace(rsc, "Un-ordered version");
     }
 }
 
 static void
 master_update_pseudo_status(resource_t * rsc, gboolean * demoting, gboolean * promoting)
 {
     GListPtr gIter = NULL;
 
     if (rsc->children) {
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             master_update_pseudo_status(child, demoting, promoting);
         }
         return;
     }
 
     CRM_ASSERT(demoting != NULL);
     CRM_ASSERT(promoting != NULL);
 
     gIter = rsc->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         action_t *action = (action_t *) gIter->data;
 
         if (*promoting && *demoting) {
             return;
 
         } else if (is_set(action->flags, pe_action_optional)) {
             continue;
 
         } else if (safe_str_eq(RSC_DEMOTE, action->task)) {
             *demoting = TRUE;
 
         } else if (safe_str_eq(RSC_PROMOTE, action->task)) {
             *promoting = TRUE;
         }
     }
 }
 
 #define apply_master_location(list) do {				\
     gIter2 = list;							\
     for(; gIter2 != NULL; gIter2 = gIter2->next) {			\
 	rsc_to_node_t *cons = (rsc_to_node_t*)gIter2->data;		\
 									\
 	cons_node = NULL;						\
 	if(cons->role_filter == RSC_ROLE_MASTER) {			\
 	    pe_rsc_trace(rsc, "Applying %s to %s",				\
 			cons->id, child_rsc->id);			\
 	    cons_node = pe_find_node_id(				\
 		cons->node_list_rh, chosen->details->id);		\
 	}								\
 	if(cons_node != NULL) {						\
 	    int new_priority = merge_weights(				\
 		child_rsc->priority, cons_node->weight);		\
 	    pe_rsc_trace(rsc, "\t%s: %d->%d (%d)", child_rsc->id,		\
 			child_rsc->priority, new_priority, cons_node->weight); \
 	    child_rsc->priority = new_priority;				\
 	}								\
     }									\
     } while(0)
 
 static node_t *
 can_be_master(resource_t * rsc)
 {
     node_t *node = NULL;
     node_t *local_node = NULL;
     resource_t *parent = uber_parent(rsc);
     clone_variant_data_t *clone_data = NULL;
 
 #if 0
     enum rsc_role_e role = RSC_ROLE_UNKNOWN;
 
     role = rsc->fns->state(rsc, FALSE);
     crm_info("%s role: %s", rsc->id, role2text(role));
 #endif
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
 
             if (can_be_master(child) == NULL) {
                 pe_rsc_trace(rsc, "Child %s of %s can't be promoted", child->id, rsc->id);
                 return NULL;
             }
         }
     }
 
     node = rsc->fns->location(rsc, NULL, FALSE);
     if (node == NULL) {
         pe_rsc_trace(rsc, "%s cannot be master: not allocated", rsc->id);
         return NULL;
 
     } else if (is_not_set(rsc->flags, pe_rsc_managed)) {
         if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_MASTER) {
             crm_notice("Forcing unmanaged master %s to remain promoted on %s",
                        rsc->id, node->details->uname);
 
         } else {
             return NULL;
         }
 
     } else if (rsc->priority < 0) {
         pe_rsc_trace(rsc, "%s cannot be master: preference: %d", rsc->id, rsc->priority);
         return NULL;
 
     } else if (can_run_resources(node) == FALSE) {
         crm_trace("Node cant run any resources: %s", node->details->uname);
         return NULL;
     }
 
     get_clone_variant_data(clone_data, parent);
     local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id);
 
     if (local_node == NULL) {
         crm_err("%s cannot run on %s: node not allowed", rsc->id, node->details->uname);
         return NULL;
 
     } else if (local_node->count < clone_data->master_node_max
                || is_not_set(rsc->flags, pe_rsc_managed)) {
         return local_node;
 
     } else {
         pe_rsc_trace(rsc, "%s cannot be master on %s: node full", rsc->id, node->details->uname);
     }
 
     return NULL;
 }
 
 static gint
 sort_master_instance(gconstpointer a, gconstpointer b, gpointer data_set)
 {
     int rc;
     enum rsc_role_e role1 = RSC_ROLE_UNKNOWN;
     enum rsc_role_e role2 = RSC_ROLE_UNKNOWN;
 
     const resource_t *resource1 = (const resource_t *)a;
     const resource_t *resource2 = (const resource_t *)b;
 
     CRM_ASSERT(resource1 != NULL);
     CRM_ASSERT(resource2 != NULL);
 
     role1 = resource1->fns->state(resource1, TRUE);
     role2 = resource2->fns->state(resource2, TRUE);
 
     rc = sort_rsc_index(a, b);
     if (rc != 0) {
         crm_trace("%s %c %s (index)", resource1->id, rc < 0 ? '<' : '>', resource2->id);
         return rc;
     }
 
     if (role1 > role2) {
         crm_trace("%s %c %s (role)", resource1->id, '<', resource2->id);
         return -1;
 
     } else if (role1 < role2) {
         crm_trace("%s %c %s (role)", resource1->id, '>', resource2->id);
         return 1;
     }
 
     return sort_clone_instance(a, b, data_set);
 }
 
 GHashTable *
 master_merge_weights(resource_t * rsc, const char *rhs, GHashTable * nodes, const char *attr,
                      float factor, enum pe_weights flags)
 {
     return rsc_merge_weights(rsc, rhs, nodes, attr, factor, flags);
 }
 
 static void
 master_promotion_order(resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = NULL;
     node_t *node = NULL;
     node_t *chosen = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (clone_data->merged_master_weights) {
         return;
     }
     clone_data->merged_master_weights = TRUE;
     pe_rsc_trace(rsc, "Merging weights for %s", rsc->id);
     set_bit(rsc->flags, pe_rsc_merging);
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child = (resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Sort index: %s = %d", child->id, child->sort_index);
     }
     dump_node_scores(LOG_DEBUG_3, rsc, "Before", rsc->allowed_nodes);
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child = (resource_t *) gIter->data;
         char *score = NULL;
 
         chosen = child->fns->location(child, NULL, FALSE);
         if (chosen == NULL || child->sort_index < 0) {
             pe_rsc_trace(rsc, "Skipping %s", child->id);
             continue;
         }
 
         node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id);
         CRM_ASSERT(node != NULL);
         /* adds in master preferences and rsc_location.role=Master */
         score = score2char(child->sort_index);
         pe_rsc_trace(rsc, "Adding %s to %s from %s", score,
                      node->details->uname, child->id);
         free(score);
         node->weight = merge_weights(child->sort_index, node->weight);
     }
 
     dump_node_scores(LOG_DEBUG_3, rsc, "Middle", rsc->allowed_nodes);
 
     gIter = rsc->rsc_cons;
     for (; gIter != NULL; gIter = gIter->next) {
         rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
 
         /* (re-)adds location preferences of resources that the
          * master instance should/must be colocated with
          */
         if (constraint->role_lh == RSC_ROLE_MASTER) {
             enum pe_weights flags = constraint->score == INFINITY ? 0 : pe_weights_rollback;
 
             pe_rsc_trace(rsc, "RHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id,
                          constraint->score);
             rsc->allowed_nodes =
                 constraint->rsc_rh->cmds->merge_weights(constraint->rsc_rh, rsc->id,
                                                         rsc->allowed_nodes,
                                                         constraint->node_attribute,
                                                         (float)constraint->score / INFINITY, flags);
         }
     }
 
     gIter = rsc->rsc_cons_lhs;
     for (; gIter != NULL; gIter = gIter->next) {
         rsc_colocation_t *constraint = (rsc_colocation_t *) gIter->data;
 
         /* (re-)adds location preferences of resource that wish to be
          * colocated with the master instance
          */
         if (constraint->role_rh == RSC_ROLE_MASTER) {
             pe_rsc_trace(rsc, "LHS: %s with %s: %d", constraint->rsc_lh->id, constraint->rsc_rh->id,
                          constraint->score);
             rsc->allowed_nodes =
                 constraint->rsc_lh->cmds->merge_weights(constraint->rsc_lh, rsc->id,
                                                         rsc->allowed_nodes,
                                                         constraint->node_attribute,
                                                         (float)constraint->score / INFINITY,
                                                         (pe_weights_rollback |
                                                          pe_weights_positive));
         }
     }
 
     gIter = rsc->rsc_tickets;
     for (; gIter != NULL; gIter = gIter->next) {
         rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) gIter->data;
 
         if (rsc_ticket->role_lh == RSC_ROLE_MASTER
             && (rsc_ticket->ticket->granted == FALSE || rsc_ticket->ticket->standby)) {
             resource_location(rsc, NULL, -INFINITY, "__stateful_without_ticket__", data_set);
         }
     }
 
     dump_node_scores(LOG_DEBUG_3, rsc, "After", rsc->allowed_nodes);
 
     /* write them back and sort */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child = (resource_t *) gIter->data;
 
         chosen = child->fns->location(child, NULL, FALSE);
         if (is_not_set(child->flags, pe_rsc_managed) && child->next_role == RSC_ROLE_MASTER) {
             child->sort_index = INFINITY;
 
         } else if (chosen == NULL || child->sort_index < 0) {
             pe_rsc_trace(rsc, "%s: %d", child->id, child->sort_index);
 
         } else {
             node = (node_t *) pe_hash_table_lookup(rsc->allowed_nodes, chosen->details->id);
             CRM_ASSERT(node != NULL);
 
             child->sort_index = node->weight;
         }
         pe_rsc_trace(rsc, "Set sort index: %s = %d", child->id, child->sort_index);
     }
 
     rsc->children = g_list_sort_with_data(rsc->children, sort_master_instance, data_set);
     clear_bit(rsc->flags, pe_rsc_merging);
 }
 
 static gboolean
 filter_anonymous_instance(resource_t * rsc, node_t * node)
 {
     GListPtr rIter = NULL;
     char *key = clone_strip(rsc->id);
     resource_t *parent = uber_parent(rsc);
 
     for (rIter = parent->children; rIter; rIter = rIter->next) {
         resource_t *child = rIter->data;
         resource_t *active = parent->fns->find_rsc(child, key, node, pe_find_clone|pe_find_current);
 
         /*
-         * Look for an active instance on $node, if there is one, only it recieves the master score
+         * Look for an active instance on $node, if there is one, only it receives the master score
          * Use ->find_rsc() because we might be a cloned group
          */
         if(rsc == active) {
             pe_rsc_trace(rsc, "Found %s for %s active on %s: done", active->id, key, node->details->uname);
             free(key);
             return TRUE;
         } else if(active) {
             pe_rsc_trace(rsc, "Found %s for %s on %s: not %s", active->id, key, node->details->uname, rsc->id);
             free(key);
             return FALSE;
         } else {
             pe_rsc_trace(rsc, "%s on %s: not active", key, node->details->uname);
         }
     }
 
     for (rIter = parent->children; rIter; rIter = rIter->next) {
         resource_t *child = rIter->data;
 
         /*
          * We know its not running, but any score will still count if
          * the instance has been probed on $node
          *
          * Again use ->find_rsc() because we might be a cloned group
          * and knowing that other members of the group are known here
          * implies nothing
          */
         rsc = parent->fns->find_rsc(child, key, NULL, pe_find_clone);
         pe_rsc_trace(rsc, "Checking %s for %s on %s", rsc->id, key, node->details->uname);
         if (g_hash_table_lookup(rsc->known_on, node->details->id)) {
             free(key);
             return TRUE;
         }
     }
     free(key);
     return FALSE;
 }
 
 static int
 master_score(resource_t * rsc, node_t * node, int not_set_value)
 {
     char *attr_name;
     char *name = rsc->id;
     const char *attr_value = NULL;
     int score = not_set_value, len = 0;
 
     if (rsc->children) {
         GListPtr gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child = (resource_t *) gIter->data;
             int c_score = master_score(child, node, not_set_value);
 
             if (score == not_set_value) {
                 score = c_score;
             } else {
                 score += c_score;
             }
         }
         return score;
     }
 
     if (node == NULL) {
         if (rsc->fns->state(rsc, TRUE) < RSC_ROLE_STARTED) {
             pe_rsc_trace(rsc, "Ingoring master score for %s: unknown state", rsc->id);
             return score;
         }
 
     } else {
         node_t *match = pe_find_node_id(rsc->running_on, node->details->id);
         node_t *known = pe_hash_table_lookup(rsc->known_on, node->details->id);
 
         if (is_not_set(rsc->flags, pe_rsc_unique) && filter_anonymous_instance(rsc, node)) {
             pe_rsc_trace(rsc, "Anonymous clone %s is allowed on %s", rsc->id, node->details->uname);
 
         } else if (match == NULL && known == NULL) {
             pe_rsc_trace(rsc, "%s (aka. %s) has been filtered on %s - ignoring", rsc->id,
                          rsc->clone_name, node->details->uname);
             return score;
         }
 
         match = pe_hash_table_lookup(rsc->allowed_nodes, node->details->id);
         if (match == NULL) {
             return score;
 
         } else if (match->weight < 0) {
             pe_rsc_trace(rsc, "%s on %s has score: %d - ignoring",
                          rsc->id, match->details->uname, match->weight);
             return score;
         }
     }
 
     if (rsc->clone_name) {
         /* Use the name the lrm knows this resource as,
          * since that's what crm_master would have used too
          */
         name = rsc->clone_name;
     }
 
     len = 8 + strlen(name);
     attr_name = calloc(1, len);
     sprintf(attr_name, "master-%s", name);
 
     if (node) {
         attr_value = g_hash_table_lookup(node->details->attrs, attr_name);
         pe_rsc_trace(rsc, "%s: %s[%s] = %s", rsc->id, attr_name, node->details->uname,
                      crm_str(attr_value));
     }
 
     if (attr_value != NULL) {
         score = char2score(attr_value);
     }
 
     free(attr_name);
     return score;
 }
 
 #define max(a, b) a<b?b:a
 
 static void
 apply_master_prefs(resource_t * rsc)
 {
     int score, new_score;
     GListPtr gIter = rsc->children;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (clone_data->applied_master_prefs) {
         /* Make sure we only do this once */
         return;
     }
 
     clone_data->applied_master_prefs = TRUE;
 
     for (; gIter != NULL; gIter = gIter->next) {
         GHashTableIter iter;
         node_t *node = NULL;
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         g_hash_table_iter_init(&iter, child_rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (can_run_resources(node) == FALSE) {
                 /* This node will never be promoted to master,
                  *  so don't apply the master score as that may
                  *  lead to clone shuffling
                  */
                 continue;
             }
 
             score = master_score(child_rsc, node, 0);
             if (score > 0) {
                 new_score = merge_weights(node->weight, score);
                 if (new_score != node->weight) {
                     pe_rsc_trace(rsc, "\t%s: Updating preference for %s (%d->%d)",
                                  child_rsc->id, node->details->uname, node->weight, new_score);
                     node->weight = new_score;
                 }
             }
 
             new_score = max(child_rsc->priority, score);
             if (new_score != child_rsc->priority) {
                 pe_rsc_trace(rsc, "\t%s: Updating priority (%d->%d)",
                              child_rsc->id, child_rsc->priority, new_score);
                 child_rsc->priority = new_score;
             }
         }
     }
 }
 
 static void
 set_role_slave(resource_t * rsc, gboolean current)
 {
     GListPtr gIter = rsc->children;
 
     if (current) {
         if (rsc->role == RSC_ROLE_STARTED) {
             rsc->role = RSC_ROLE_SLAVE;
         }
 
     } else {
         GListPtr allocated = NULL;
 
         rsc->fns->location(rsc, &allocated, FALSE);
 
         if (allocated) {
             rsc->next_role = RSC_ROLE_SLAVE;
 
         } else {
             rsc->next_role = RSC_ROLE_STOPPED;
         }
         g_list_free(allocated);
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         set_role_slave(child_rsc, current);
     }
 }
 
 static void
 set_role_master(resource_t * rsc)
 {
     GListPtr gIter = rsc->children;
 
     if (rsc->next_role == RSC_ROLE_UNKNOWN) {
         rsc->next_role = RSC_ROLE_MASTER;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         set_role_master(child_rsc);
     }
 }
 
 node_t *
 master_color(resource_t * rsc, node_t * prefer, pe_working_set_t * data_set)
 {
     int promoted = 0;
     GListPtr gIter = NULL;
     GListPtr gIter2 = NULL;
 
     GHashTableIter iter;
     node_t *node = NULL;
     node_t *chosen = NULL;
     node_t *cons_node = NULL;
     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
 
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (is_not_set(rsc->flags, pe_rsc_provisional)) {
         return NULL;
 
     } else if (is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     apply_master_prefs(rsc);
 
     clone_color(rsc, prefer, data_set);
 
     set_bit(rsc->flags, pe_rsc_allocating);
 
     /* count now tracks the number of masters allocated */
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         node->count = 0;
     }
 
     /*
      * assign priority
      */
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         GListPtr list = NULL;
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Assigning priority for %s: %s", child_rsc->id,
                      role2text(child_rsc->next_role));
 
         if (child_rsc->fns->state(child_rsc, TRUE) == RSC_ROLE_STARTED) {
             set_role_slave(child_rsc, TRUE);
         }
 
         chosen = child_rsc->fns->location(child_rsc, &list, FALSE);
         if (g_list_length(list) > 1) {
             crm_config_err("Cannot promote non-colocated child %s", child_rsc->id);
         }
 
         g_list_free(list);
         if (chosen == NULL) {
             continue;
         }
 
         next_role = child_rsc->fns->state(child_rsc, FALSE);
         switch (next_role) {
             case RSC_ROLE_STARTED:
             case RSC_ROLE_UNKNOWN:
                 CRM_CHECK(chosen != NULL, break);
                 /*
                  * Default to -1 if no value is set
                  *
                  * This allows master locations to be specified
                  * based solely on rsc_location constraints,
                  * but prevents anyone from being promoted if
                  * neither a constraint nor a master-score is present
                  */
                 child_rsc->priority = master_score(child_rsc, chosen, -1);
                 break;
 
             case RSC_ROLE_SLAVE:
             case RSC_ROLE_STOPPED:
                 child_rsc->priority = -INFINITY;
                 break;
             case RSC_ROLE_MASTER:
                 /* We will arrive here if we're re-creating actions after a stonith
                  */
                 break;
             default:
                 CRM_CHECK(FALSE /* unhandled */ ,
                           crm_err("Unknown resource role: %d for %s", next_role, child_rsc->id));
         }
 
         apply_master_location(child_rsc->rsc_location);
         apply_master_location(rsc->rsc_location);
 
         gIter2 = child_rsc->rsc_cons;
         for (; gIter2 != NULL; gIter2 = gIter2->next) {
             rsc_colocation_t *cons = (rsc_colocation_t *) gIter2->data;
 
             child_rsc->cmds->rsc_colocation_lh(child_rsc, cons->rsc_rh, cons);
         }
 
         child_rsc->sort_index = child_rsc->priority;
         pe_rsc_trace(rsc, "Assigning priority for %s: %d", child_rsc->id, child_rsc->priority);
 
         if (next_role == RSC_ROLE_MASTER) {
             child_rsc->sort_index = INFINITY;
         }
     }
 
     dump_node_scores(LOG_DEBUG_3, rsc, "Pre merge", rsc->allowed_nodes);
     master_promotion_order(rsc, data_set);
 
     /* mark the first N as masters */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
         char *score = score2char(child_rsc->sort_index);
 
         chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
         if (show_scores) {
             fprintf(stdout, "%s promotion score on %s: %s\n",
                     child_rsc->id, chosen ? chosen->details->uname : "none", score);
 
         } else {
             do_crm_log(scores_log_level, "%s promotion score on %s: %s",
                        child_rsc->id, chosen ? chosen->details->uname : "none", score);
         }
         free(score);
 
         chosen = NULL;          /* nuke 'chosen' so that we don't promote more than the
                                  * required number of instances
                                  */
 
         if (child_rsc->sort_index < 0) {
             pe_rsc_trace(rsc, "Not supposed to promote child: %s", child_rsc->id);
 
         } else if (promoted < clone_data->master_max || is_not_set(rsc->flags, pe_rsc_managed)) {
             chosen = can_be_master(child_rsc);
         }
 
         pe_rsc_debug(rsc, "%s master score: %d", child_rsc->id, child_rsc->priority);
 
         if (chosen == NULL) {
             set_role_slave(child_rsc, FALSE);
             continue;
         }
 
         chosen->count++;
         pe_rsc_info(rsc, "Promoting %s (%s %s)",
                     child_rsc->id, role2text(child_rsc->role), chosen->details->uname);
         set_role_master(child_rsc);
         promoted++;
     }
 
     clone_data->masters_allocated = promoted;
     pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d to master",
                 rsc->id, promoted, clone_data->master_max);
 
     clear_bit(rsc->flags, pe_rsc_provisional);
     clear_bit(rsc->flags, pe_rsc_allocating);
 
     return NULL;
 }
 
 void
 master_create_actions(resource_t * rsc, pe_working_set_t * data_set)
 {
     action_t *action = NULL;
     GListPtr gIter = rsc->children;
     action_t *action_complete = NULL;
     gboolean any_promoting = FALSE;
     gboolean any_demoting = FALSE;
     resource_t *last_promote_rsc = NULL;
     resource_t *last_demote_rsc = NULL;
 
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_debug(rsc, "Creating actions for %s", rsc->id);
 
     /* create actions as normal */
     clone_create_actions(rsc, data_set);
 
     for (; gIter != NULL; gIter = gIter->next) {
         gboolean child_promoting = FALSE;
         gboolean child_demoting = FALSE;
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Creating actions for %s", child_rsc->id);
         child_rsc->cmds->create_actions(child_rsc, data_set);
         master_update_pseudo_status(child_rsc, &child_demoting, &child_promoting);
 
         any_demoting = any_demoting || child_demoting;
         any_promoting = any_promoting || child_promoting;
         pe_rsc_trace(rsc, "Created actions for %s: %d %d", child_rsc->id, child_promoting,
                      child_demoting);
     }
 
     /* promote */
     action = promote_action(rsc, NULL, !any_promoting);
     action_complete = custom_action(rsc, promoted_key(rsc),
                                     RSC_PROMOTED, NULL, !any_promoting, TRUE, data_set);
 
     action_complete->priority = INFINITY;
     update_action_flags(action, pe_action_pseudo);
     update_action_flags(action, pe_action_runnable);
     update_action_flags(action_complete, pe_action_pseudo);
     update_action_flags(action_complete, pe_action_runnable);
 
     if (clone_data->masters_allocated > 0) {
         update_action_flags(action, pe_action_runnable);
         update_action_flags(action_complete, pe_action_runnable);
     }
 
     child_promoting_constraints(clone_data, pe_order_optional,
                                 rsc, NULL, last_promote_rsc, data_set);
 
     if (clone_data->promote_notify == NULL) {
         clone_data->promote_notify =
             create_notification_boundaries(rsc, RSC_PROMOTE, action, action_complete, data_set);
     }
 
     /* demote */
     action = demote_action(rsc, NULL, !any_demoting);
     action_complete = custom_action(rsc, demoted_key(rsc),
                                     RSC_DEMOTED, NULL, !any_demoting, TRUE, data_set);
     action_complete->priority = INFINITY;
 
     update_action_flags(action, pe_action_pseudo);
     update_action_flags(action, pe_action_runnable);
     update_action_flags(action_complete, pe_action_pseudo);
     update_action_flags(action_complete, pe_action_runnable);
 
     child_demoting_constraints(clone_data, pe_order_optional, rsc, NULL, last_demote_rsc, data_set);
 
     if (clone_data->demote_notify == NULL) {
         clone_data->demote_notify =
             create_notification_boundaries(rsc, RSC_DEMOTE, action, action_complete, data_set);
 
         if (clone_data->promote_notify) {
             /* If we ever wanted groups to have notifications we'd need to move this to native_internal_constraints() one day
              * Requires exposing *_notify
              */
             order_actions(clone_data->stop_notify->post_done, clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->start_notify->post_done, clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done, clone_data->promote_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done, clone_data->start_notify->pre,
                           pe_order_optional);
             order_actions(clone_data->demote_notify->post_done, clone_data->stop_notify->pre,
                           pe_order_optional);
         }
     }
 
     /* restore the correct priority */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         child_rsc->priority = rsc->priority;
     }
 }
 
 void
 master_internal_constraints(resource_t * rsc, pe_working_set_t * data_set)
 {
     GListPtr gIter = rsc->children;
     resource_t *last_rsc = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     clone_internal_constraints(rsc, data_set);
 
     /* global stopped before start */
     new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_START, pe_order_optional, data_set);
 
     /* global stopped before promote */
     new_rsc_order(rsc, RSC_STOPPED, rsc, RSC_PROMOTE, pe_order_optional, data_set);
 
     /* global demoted before start */
     new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_START, pe_order_optional, data_set);
 
     /* global started before promote */
     new_rsc_order(rsc, RSC_STARTED, rsc, RSC_PROMOTE, pe_order_optional, data_set);
 
     /* global demoted before stop */
     new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_STOP, pe_order_optional, data_set);
 
     /* global demote before demoted */
     new_rsc_order(rsc, RSC_DEMOTE, rsc, RSC_DEMOTED, pe_order_optional, data_set);
 
     /* global demoted before promote */
     new_rsc_order(rsc, RSC_DEMOTED, rsc, RSC_PROMOTE, pe_order_optional, data_set);
 
     for (; gIter != NULL; gIter = gIter->next) {
         resource_t *child_rsc = (resource_t *) gIter->data;
 
         /* child demote before promote */
         new_rsc_order(child_rsc, RSC_DEMOTE, child_rsc, RSC_PROMOTE, pe_order_optional, data_set);
 
         child_promoting_constraints(clone_data, pe_order_optional,
                                     rsc, child_rsc, last_rsc, data_set);
 
         child_demoting_constraints(clone_data, pe_order_optional,
                                    rsc, child_rsc, last_rsc, data_set);
 
         last_rsc = child_rsc;
     }
 }
 
 static void
 node_hash_update_one(GHashTable * hash, node_t * other, const char *attr, int score)
 {
     GHashTableIter iter;
     node_t *node = NULL;
     const char *value = NULL;
 
     if (other == NULL) {
         return;
 
     } else if (attr == NULL) {
         attr = "#" XML_ATTR_UNAME;
     }
 
     value = g_hash_table_lookup(other->details->attrs, attr);
     g_hash_table_iter_init(&iter, hash);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         const char *tmp = g_hash_table_lookup(node->details->attrs, attr);
 
         if (safe_str_eq(value, tmp)) {
             crm_trace("%s: %d + %d", node->details->uname, node->weight, other->weight);
             node->weight = merge_weights(node->weight, score);
         }
     }
 }
 
 void
 master_rsc_colocation_rh(resource_t * rsc_lh, resource_t * rsc_rh, rsc_colocation_t * constraint)
 {
     GListPtr gIter = NULL;
 
     CRM_CHECK(rsc_rh != NULL, return);
     if (is_set(rsc_rh->flags, pe_rsc_provisional)) {
         return;
 
     } else if (constraint->role_rh == RSC_ROLE_UNKNOWN) {
         pe_rsc_trace(rsc_rh, "Handling %s as a clone colocation", constraint->id);
         clone_rsc_colocation_rh(rsc_lh, rsc_rh, constraint);
         return;
     }
 
     CRM_CHECK(rsc_lh != NULL, return);
     CRM_CHECK(rsc_lh->variant == pe_native, return);
     pe_rsc_trace(rsc_rh, "Processing constraint %s: %d", constraint->id, constraint->score);
 
     if (constraint->role_rh == RSC_ROLE_UNKNOWN) {
 
         gIter = rsc_rh->children;
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child_rsc = (resource_t *) gIter->data;
 
             child_rsc->cmds->rsc_colocation_rh(rsc_lh, child_rsc, constraint);
         }
 
     } else if (is_set(rsc_lh->flags, pe_rsc_provisional)) {
         GListPtr rhs = NULL;
 
         gIter = rsc_rh->children;
         for (; gIter != NULL; gIter = gIter->next) {
             resource_t *child_rsc = (resource_t *) gIter->data;
             node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
             enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, FALSE);
 
             pe_rsc_trace(rsc_rh, "Processing: %s", child_rsc->id);
             if (chosen != NULL && next_role == constraint->role_rh) {
                 pe_rsc_trace(rsc_rh, "Applying: %s %s %s %d", child_rsc->id,
                              role2text(next_role), chosen->details->uname, constraint->score);
                 if (constraint->score < INFINITY) {
                     node_hash_update_one(rsc_lh->allowed_nodes, chosen,
                                          constraint->node_attribute, constraint->score);
                 }
                 rhs = g_list_prepend(rhs, chosen);
             }
         }
 
         /* Only do this if its not a master-master colocation
          * Doing this unconditionally would prevent the slaves from being started
          */
         if (constraint->role_lh != RSC_ROLE_MASTER || constraint->role_rh != RSC_ROLE_MASTER) {
             if (constraint->score >= INFINITY) {
                 node_list_exclude(rsc_lh->allowed_nodes, rhs, TRUE);
             }
         }
         g_list_free(rhs);
 
     } else if (constraint->role_lh == RSC_ROLE_MASTER) {
         resource_t *rh_child = find_compatible_child(rsc_lh, rsc_rh, constraint->role_rh, FALSE);
 
         if (rh_child == NULL && constraint->score >= INFINITY) {
             pe_rsc_trace(rsc_lh, "%s can't be promoted %s", rsc_lh->id, constraint->id);
             rsc_lh->priority = -INFINITY;
 
         } else if (rh_child != NULL) {
             int new_priority = merge_weights(rsc_lh->priority, constraint->score);
 
             pe_rsc_debug(rsc_lh, "Applying %s to %s", constraint->id, rsc_lh->id);
             pe_rsc_debug(rsc_lh, "\t%s: %d->%d", rsc_lh->id, rsc_lh->priority, new_priority);
             rsc_lh->priority = new_priority;
         }
     }
 
     return;
 }
 
 void
 master_append_meta(resource_t * rsc, xmlNode * xml)
 {
     char *name = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     clone_append_meta(rsc, xml);
 
     name = crm_meta_name(XML_RSC_ATTR_MASTER_MAX);
     crm_xml_add_int(xml, name, clone_data->master_max);
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_MASTER_NODEMAX);
     crm_xml_add_int(xml, name, clone_data->master_node_max);
     free(name);
 }
diff --git a/xml/crm-1.0.dtd b/xml/crm-1.0.dtd
index 3208f02a7a..90960d3716 100644
--- a/xml/crm-1.0.dtd
+++ b/xml/crm-1.0.dtd
@@ -1,865 +1,865 @@
 <?xml version="1.0" encoding="UTF-8" ?>
 <!--
 GLOBAL TODOs:
 
 Versionize DTD so we can validate against a specific version
 
 Background
  The CIB is described quite well in section 5 of the crm.txt (checked into CVS in the crm directory) so it is not repeated here.
  Suffice to say that it stores the configuration and runtime data required for cluster-wide resource management in XML format.
 
 CIB: Information Structure
  The CIB is divided into two main sections: The "static" configuration part and the "dynamic" status.
 
  The configuration contains - surprisingly - the configuration of the cluster, namely node attributes, resource instance configuration, and the constraints which describe the dependencies between all these.
  To identify the most recent configuration available in the cluster, this section is time-stamped with the unique timestamp of the last update.
 
  The status part is dynamically generated / updated by the CRM system and represents the current status of the cluster; which nodes are up, down or crashed, which resources are running where etc.
 
  Every information carrying object has an "id" tag, which is basically the UUID of it, should we ever need to access it directly.
  Unless otherwise stated, the id field is a short name consisting simple ascii characters [a-zA-Z0-9_\-]
  The exception is for resources because the LRM can support only id's of up to 64 characters.
 
 Other Notes
  The description field in all elements is opaque to the CRM and is for administrative comments.
 
 TODO
  * Figure out a sane way to version the DTD
  * Do we need to know about ping nodes...?
  * The integer comparison type really should be number
 -->
 <!ELEMENT cib (configuration, status)>
 <!ATTLIST cib
           cib-last-written CDATA        #IMPLIED
 
           admin_epoch  CDATA        #REQUIRED
           epoch        CDATA        #REQUIRED
           num_updates  CDATA        #REQUIRED
           num_peers    CDATA        #IMPLIED        
 
           cib_feature_revision  CDATA   #IMPLIED
           crm_feature_set       CDATA   #IMPLIED
           remote_access_port    CDATA   #IMPLIED        
 
           dc_uuid        CDATA             #IMPLIED
           ccm_transition CDATA             #IMPLIED
           have_quorum    (true|yes|1|false|no|0)  'false'
           ignore_dtd     (true|yes|1|false|no|0)  #IMPLIED
 
           validate-with  CDATA    #IMPLIED        
           generated      CDATA    #IMPLIED        
           crm-debug-origin CDATA    #IMPLIED>
 
 <!--
 The CIB's version is a tuple of admin_epoch, epoch and num_updates (in that order).
 
 This is used when applying updates from the master CIB instance.
 
 Additionally, num_peers and have_quorum are used during the election process to determin who has the latest configuration.
  * num_updates is incremented every time the CIB changes.
  * epoch is incremented after every DC election.
  * admin_epoch is exclusivly for the admin to change.
  * num_peers is the number of CIB instances that we can talk to
  * have_quorum is derived from the ConsensusClusterMembership layer
  * dc_uuid stored the UUID of the current DesignatedController
  * ccm_transition stores the membership instance from the ConsensusClusterMembership layer.
  * cib_feature_revision is the feature set that this configuration requires
 -->
 <!ELEMENT configuration (crm_config, nodes, resources, constraints)>
 
 <!--
 crm_config
 
 Used to specify cluster-wide options.
 
 The use of multiple cluster_property_set sections and time-based rule expressions allows the the cluster to behave differently (for example) during business hours than it does overnight.
 -->
 <!ELEMENT crm_config (cluster_property_set)*>
 
 <!--
 Current crm_config options:
 
  * transition_idle_timeout (interval, default=60s):
    If no activity is recorded in this time, the transition is deemed failed as are all sent actions that have not yet been confirmed complete.
    If any operation initiated has an explicit higher timeout, the higher value applies.
 
  * symmetric_cluster (boolean, default=TRUE):
    If true, resources are permitted to run anywhere by default.
    Otherwise, explicit constraints must be created to specify where they can run.
 
  * stonith_enabled (boolean, default=FALSE):
    If true, failed nodes will be fenced.
 
  * no_quorum_policy (enum, default=stop)
    * ignore - Pretend we have quorum
    * freeze - Do not start any resources not currently in our partition.
      Resources in our partition may be moved to another node within the partition
      Fencing is disabled
    * stop - Stop all running resources in our partition
      Fencing is disabled
 
  * default_resource_stickiness
    Do we prefer to run on the existing node or be moved to a "better" one?
    * 0 : resources will be placed optimally in the system.
      This may mean they are moved when a "better" or less loaded node becomes available.
      This option is almost equivalent to auto_failback on except that the resource may be moved to other nodes than the one it was previously active on.
    * value > 0 : resources will prefer to remain in their current location but may be moved if a more suitable node is available.
      Higher values indicate a stronger preference for resources to stay where they are.
    * value < 0 : resources prefer to move away from their current location.
      Higher absolute values indicate a stronger preference for resources to be moved.
    * INFINITY : resources will always remain in their current locations until forced off because the node is no longer eligible to run the resource (node shutdown, node standby or configuration change).
      This option is almost equivalent to auto_failback off except that the resource may be moved to other nodes than the one it was previously active on.
    * -INFINITY : resources will always move away from their current location.
 
  * is_managed_default (boolean, default=TRUE)
    Unless the resource's definition says otherwise,
    * TRUE : resources will be started, stopped, monitored and moved as necessary/required
    * FALSE : resources will not started if stopped, stopped if started nor have any recurring actions scheduled.
 
  * stop_orphan_resources (boolean, default=TRUE (as of release 2.0.6))
    If a resource is found for which we have no definition for;
    * TRUE : Stop the resource
    * FALSE : Ignore the resource
    This mostly effects the CRM's behavior when a resource is deleted by an admin without it first being stopped.
 
  * stop_orphan_actions (boolean, default=TRUE)
    If a recurring action is found for which we have no definition for;
    * TRUE : Stop the action
    * FALSE : Ignore the action
    This mostly effects the CRM's behavior when the interval for a recurring action is changed.
 -->
 <!ELEMENT cluster_property_set (rule*, attributes)>
 <!ATTLIST cluster_property_set
           id                CDATA        #REQUIRED
           score             CDATA        #IMPLIED>
 
 <!ELEMENT nodes       (node*)>
 
 <!--
  * id    : the node's UUID.
  * uname : the result of uname -n
  * type  : should either be "normal" or "member" for nodes you with to run resources 
    "normal" is preferred as of version 2.0.4
 
 Each node can also have additional "instance" attributes.
 These attributes are completely arbitrary and can be used later in constraints.
 In this way it is possible to define groups of nodes to which a constraint can apply.
 
 It is also theoretically possible to have a process on each node which updates these values automatically.
 This would make it possible to have an attribute that represents "connected to SAN subsystem" or perhaps "system_load (low|medium|high)".
 
 Ideally it would be possible to have the CRMd on each node gather some of this information and automatically populate things like architecture and OS/kernel version.
 -->
 <!ELEMENT node (instance_attributes*)>
 <!ATTLIST node
           id            CDATA         #REQUIRED
           uname         CDATA         #REQUIRED
           description   CDATA         #IMPLIED
           type          (normal|member|ping) #REQUIRED>
 
 <!ELEMENT resources   (primitive|group|clone|master_slave)*>
 
 <!--
  * class
    Specifies the location and standard the resource script conforms to
    * ocf
      Most OCF RAs started out life as v1 Heartbeat resource agents.
      These have all been ported to meet the OCF specifications.
      As an added advantage, in accordance with the OCF spec, they also describe the parameters they take and what their defaults are.
      It is also easier to configure them as each part of the configuration is passed as its own parameter.
      In accordance with the OCF spec, each parameter is passed to the RA with an OCF_RESKEY_ prefix.
      So ip=192.168.1.1 in the CIB would be passed as OCF_RESKEY_ip=192.168.1.1.
      Located under /usr/lib/ocf/resource.d/heartbeat/.
    * lsb
      Most Linux init scripts conform to the LSB specification.
      The class allows you to use those that do as resource agents controlled by Heartbeat.
      Located in /etc/init.d/.
    * heartbeat
      This class gives you access to the v1 Heartbeat resource agents and allows you to reuse any custom agents you may have written.
      Located at /etc/heartbeat/resource.d/ or /etc/ha.d/resource.d.
 
  * type : The name of the ResourceAgent you wish to use.
 
  * provider
    The OCF spec allows multiple vendors to supply the same ResourceAgent.
    To use the OCF resource agents supplied with Heartbeat, you should specify heartbeat here
 
  * is_managed : Is the ClusterResourceManager in control of this resource.
    * true : (default) the resource will be started, stopped, monitored and moved as necessary/required
    * false : the resource will not started if stopped, stopped if started nor have any recurring actions scheduled.
      The resource may still be referenced in colocation constraints and ordering constraints (though obviously if no actions are performed on it then it will prevent the action on the other resource too)
 
  * restart_type
    Used when the other side of an ordering dependency is restarted/moved.
    * ignore : the default.
      Don't do anything extra.
    * restart
      Use this for example to have a restart of your database also trigger a restart of your web-server.
    * multiple_active
      Used when a resource is detected as being active on more than one machine.
      The default value, stop_start, will stop all instances and start only 1
    * block : don't do anything, wait for the administrator
    * stop_only : stop all the active instances
    * stop_start : start the resource on one node after having stopped all the active instances
 
  * resource_stickiness
    See the description of the default_resource_stickiness cluster attribute.
    resource_stickiness allows you to override the cluster's default for the individual resource.
 
 NOTE: primitive resources may contain at most one "operations" object.
       The CRM will complain about your configuration if this criteria is not met.
       Please use crm_verify to ensure your configuration is valid.
       The DTD is written this way to be order in-sensitive.
 -->
 <!ELEMENT primitive (operations|meta_attributes|instance_attributes)*>
 <!ATTLIST primitive
           id                CDATA        #REQUIRED
           description       CDATA        #IMPLIED
           class             (ocf|lsb|heartbeat|stonith) #REQUIRED
           type              CDATA        #REQUIRED
           provider          CDATA        #IMPLIED
 
           is_managed            CDATA                            #IMPLIED
           restart_type          (ignore|restart)                 'ignore'
           multiple_active       (stop_start|stop_only|block)     'stop_start'
           resource_stickiness   CDATA                             #IMPLIED>
 <!--
 This allows us to specify how long an action can take
 
  * name : the name of the operation.
    Supported operations are start, stop, & monitor
 
  * start_delay : delay the operation after starting the resource
    By default this value is in milliseconds, however you can also specify a value in seconds like so timeout="5s". Used for the monitor operation.
 
  * timeout : the maximum period of time before considering the action failed.
    By default this value is in milliseconds, however you can also specify a value in seconds like so timeout="5s".
 
  * interval : This currently only applies to monitor operations and specifies how often the LRM should check the resource is active.
    The same notation for timeout applies.
 
  * prereq : What conditions need to be met before this action can be run
    * nothing : This action can be performed at any time
    * quorum : This action requires the partition to have quorum
    * fencing : This action requires the partition to have quorum and any fencing operations to have completed before it can be executed
 
  * on_fail : The action to take if this action ever fails.
    * nothing : Pretend the action didnt actually fail
    * block : Take no further action on the resource - wait for the administrator to resolve the issue
    * restart : Stop the resource and re-allocate it elsewhere
    * stop : Stop the resource and DO NOT re-allocate it elsewhere
    * fence : Currently this means fence the node on which the resource is running.
      Any other resources currently active on the machine will be migrated away before fencing occurs.
 
 Only one entry per supported action+interval is currently permitted.
 Parameters specific to each operation can be passed using the instance_attributes section.
 -->
 <!ELEMENT operations (op*)>
 <!ELEMENT op (meta_attributes|instance_attributes)*>
 <!ATTLIST op
           id            CDATA         #REQUIRED
           name          CDATA         #REQUIRED
           description   CDATA         #IMPLIED
           interval      CDATA         #IMPLIED
           timeout       CDATA         #IMPLIED
           start_delay   CDATA         '0'
           disabled      (true|yes|1|false|no|0)        'false'
           role          (Master|Slave|Started|Stopped) 'Started'
           prereq        (nothing|quorum|fencing)       #IMPLIED
           on_fail       (ignore|block|stop|restart|fence)     #IMPLIED>
 <!--
 Use this to emulate v1 type Heartbeat groups.
 Defining a resource group is a quick way to make sure that the resources:
  * are all started on the same node, and
  * are started and stopped in the correct (sequential) order
 though either or both of these properties can be disabled.
 
 NOTE: Do not create empty groups.  
       They are temporarily supported because the GUI requires it but will be removed as soon as possible.
       The DTD is written this way to be order in-sensitive.
 -->
 <!ELEMENT group (meta_attributes|instance_attributes|primitive)*>
 <!ATTLIST group
           id            CDATA               #REQUIRED
           description   CDATA               #IMPLIED
 
           is_managed            CDATA                         #IMPLIED
           restart_type          (ignore|restart)              'ignore'
           multiple_active       (stop_start|stop_only|block)  'stop_start'
           resource_stickiness   CDATA			      #IMPLIED
 
           ordered               (true|yes|1|false|no|0)       'true'
           collocated            (true|yes|1|false|no|0)       'true'>
 <!--
 Clones are intended as a mechanism for easily starting a number of resources (such as a web-server) with the same configuration.
 As an added benefit, the number that should be started is an instance parameter and when combined with time-based constraints, allows the administrator to run more instances during peak times and save on resources during idle periods.
 
  * ordered
    Start (or stop) each clone only after the operation on the previous clone completed.
 
  * interleaved
    If a colocation constraint is created between two clone resources and interleaved is true, then clone N from one resource will be assigned the same location as clone N from the other resource.
    If the number of runnable clones differs, then the leftovers can be located anywhere.
 Using a cloned group is a much better way of achieving the same result.
 
  * notify
    If true, inform peers before and after any clone is stopped or started.
-   If an action failed, you will (currently) not recieve a post-notification.
+   If an action failed, you will (currently) not receive a post-notification.
    Instead you can next expect to see a pre-notification for a stop.
    If a stop fails, and you have fencing you will get a post-notification for the stop after the fencing operation has completed.
    In order to use the notification service ALL decendants of the clone MUST support the notify action.
    Currently this action is not permitted to fail, though depending on your configuration, can block almost indefinitly.
    Behaviour in response to a failed action or notificaiton is likely to be improved in future releases.
 
    See http://www.clusterlabs.org/doc/en-US/Pacemaker/1.0/html/Pacemaker_Explained/s-resource-clone.html for more information on notify actions
 
 
 NOTE: Clones must contain exactly one primitive or one group resource. 
       The CRM will complain about your configuration if this criteria is not met.
       Please use crm_verify to ensure your configuration is valid.
       The DTD is written this way to be order in-sensitive.
 -->
 
 <!ELEMENT clone (meta_attributes|instance_attributes|primitive|group)*>
 <!ATTLIST clone
           id            CDATA               #REQUIRED
           description   CDATA               #IMPLIED
 
           is_managed            CDATA                         #IMPLIED
           restart_type          (ignore|restart)              'ignore'
           multiple_active       (stop_start|stop_only|block)  'stop_start'
           resource_stickiness   CDATA                         #IMPLIED
 
           notify                (true|yes|1|false|no|0)       'false'
           globally_unique       (true|yes|1|false|no|0)       'true'
           ordered               (true|yes|1|false|no|0)       'false'
           interleave            (true|yes|1|false|no|0)       'false'>
 <!--
 Master/Slave resources are a superset of Clones in that instances can also be in one of two states.
 The meaning of the states is specific to the resource.
 
 NOTE: master_slave must contain exactly one primitive resource OR one group resource.
       It may not contain both, nor may it contain neither.
       The CRM will complain about your configuration if this criteria is not met.
       Please use crm_verify to ensure your configuration is valid.
       The DTD is written this way to be order in-sensitive.
 -->
 <!ELEMENT master_slave (meta_attributes|instance_attributes|primitive|group)*>
 <!ATTLIST master_slave
           id            CDATA       #REQUIRED
           description   CDATA       #IMPLIED
 
           is_managed            CDATA                         #IMPLIED
           restart_type          (ignore|restart)              'ignore'
           multiple_active       (stop_start|stop_only|block)  'stop_start'
           resource_stickiness   CDATA                         #IMPLIED
 
           notify                (true|yes|1|false|no|0)       'false'
           globally_unique       (true|yes|1|false|no|0)       'true'
           ordered               (true|yes|1|false|no|0)       'false'
           interleave            (true|yes|1|false|no|0)       'false'>
 
 <!--
 Most resource options are configured as instance attributes.
 Some of the built-in options can be configured directly on the resource or as an instance attribute.
 The advantage of using instance attributes is the added flexibility that can be achieved through conditional ?<rule/>s (see below).
 
 You can have multiple sets of 'instance attributes', they are first sorted by score and then processed.
 The first to have its ?<rule/> satisfied and define an attribute wins.
 Subsequent values for the attribute will be ignored.
 
 Note that:
  * instance_attributes sets with id equal to cib-bootstrap-options are treated as if they have a score of INFINITY.
  * instance_attributes sets with no score implicitly have a score of zero.
  * instance_attributes sets with no rule implicitly have a rule that evaluates to true.
 
 The addition of conditional <rule/>s to the instance_attributes object allows for an infinite variety of configurations.
 Just some of the possibilities are:
  * Specify different resource parameters
    * depending on the node it is allocated to (a resource may need to use eth1 on host1 but eth0 on host2)
    * depending on the time of day (run 10 web-servers at night an 100 during the day)
  * Allow nodes to have different attributes depending on the time-of-day
    * Set resource_stickiness to avoid failback during business hours but allow resources to be moved to a more preferred node on the weekend
    * Switch a node between a "front-end" processing group during the day to a "back-end" group at night.
 
 Common instance attributes for all resource types:
  * priority (integer, default=0):
    dictates the order in which resources will be processed.
    If there is an insufficient number of nodes to run all resources, the lower priority resources will be stopped to make sure the higher priority resources remain active.
 
  * is_managed: See previous description.
 
  * resource_stickiness: See previous description.
 
  * target_role: (Started|Stopped|Master|Slave|default, default=#default)
    * #default : Let the cluster decide what to do with the resource
    * Started : Ignore any specified value of is_managed or is_managed_default and attempt to start the resource
    * Stopped : Ignore any specified value of is_managed or is_managed_default and attempt to stop the resource
    * Master : Ignore any specified value of is_managed, is_managed_default or promotion preferences and attempt to put all instances of a cloned resource into Master mode.
    * Slave : Ignore any specified value of is_managed, is_managed_default or promotion preferences and attempt to put all instances of a cloned resource into Slave mode.
 
 Common instance attributes for clones:
  * clone_max (integer, default=1):
    the number of clones to be run
 
 * clone_node_max (integer, default=1):
   the maximum number of clones to be run on a single node
 
 Common instance attributes for nodes:
  * standby (boolean, default=FALSE)
    if TRUE, indicates that resources can not be run on the node
 -->
 <!ELEMENT instance_attributes (rule*, attributes)>
 <!ATTLIST instance_attributes
           id                CDATA        #REQUIRED
           score             CDATA        #IMPLIED>
 
 <!ELEMENT meta_attributes (rule*, attributes)>
 <!ATTLIST meta_attributes
           id                CDATA        #REQUIRED
           score             CDATA        #IMPLIED>
 
 <!--
 Every constraint entry also has a 'lifetime' attribute, which expresses when this constraint is applicable.
 For example, a constraint may only be valid during certain times of the day, or days of the week.
 Eventually, we would like to be able to support constraints that only last until events such as the next reboot or the next transition.
 -->
 <!ELEMENT constraints (rsc_order|rsc_colocation|rsc_location)*>
 
 <!--
 rsc_ordering constraints express dependencies between the actions on two resources.
  * from : A resource id
  * action : What action does this constraint apply to.
  * type : Should the action on from occur before or after action on to
  * to : A resource id
  * symmetrical : If TRUE, create the reverse constraint for the other action also.
 
 Read as:
      action from type to_action to
 eg. 
      start rsc1 after promote rsc2
 
 -->
 <!ELEMENT rsc_order (lifetime?)>
 <!ATTLIST rsc_order
           id        CDATA #REQUIRED
           from      CDATA #REQUIRED
           to        CDATA #REQUIRED
           action    CDATA		'start'
           to_action CDATA		'start'
           type      (before|after)	'after'
           score     CDATA		'INFINITY'
           symmetrical (true|yes|1|false|no|0)	'true'>
 
 <!--
 
 Specify where a resource should run relative to another resource
 
 Make rsc 'from' run on the same machine as rsc 'to'
 
 If rsc 'to' cannot run anywhere and 'score' is INFINITY, 
   then rsc 'from' wont be allowed to run anywhere either
 If rsc 'from' cannot run anywhere, then 'to' wont be affected
 
 -->
 <!ELEMENT rsc_colocation (lifetime?)>
 <!ATTLIST rsc_colocation
           id             CDATA #REQUIRED
           from           CDATA #REQUIRED
           from_role      CDATA #IMPLIED
           to             CDATA #REQUIRED
           to_role        CDATA #IMPLIED
           symmetrical    (true|yes|1|false|no|0)	'false'
           node_attribute CDATA #IMPLIED
           score          CDATA #REQUIRED>
 
 <!--
 Specify which nodes are eligible for running a given resource.
 
 During processing, all rsc_location for a given rsc are evaluated.
 
 All nodes start out with their base weight (which defaults to zero).
 This can then be modified (up or down) using any number of rsc_location constraints.
 
 Then the highest non-zero available node is determined to place the resource.
 If multiple nodes have the same weighting, the node with the fewest running resources is chosen.
 
 The rsc field is, surprisingly, a resource id.
 -->
 <!ELEMENT rsc_location (lifetime?,rule*)>
 <!ATTLIST rsc_location
           id          CDATA #REQUIRED
           description CDATA #IMPLIED
           rsc         CDATA #REQUIRED
           node        CDATA #IMPLIED
           score       CDATA #IMPLIED>
 <!ELEMENT lifetime (rule+)>
 <!ATTLIST lifetime id  CDATA     #REQUIRED>
 
 <!--
  * boolean_op
    determines how the results of multiple expressions are combined.
 
  * role
    limits this rule to applying to Multi State resources with the named role.
    Roles include Started, Stopped, Slave, Master though only the last two are considered useful.
    NOTE: A rule with role="Master" can not determin the initial location of a clone instance.
    It will only affect which of the active instances will be promoted.
 
  * score
    adjusts the preference for running on the matched nodes.
    NOTE: Nodes that end up with a negative score will never run the resource.
    Two special values of "score" exist: INFINITY and -INFINITY.
    Processing of these special values is as follows:
 
       INFINITY +/- -INFINITY : -INFINITY
       INFINITY +/-  int      :  INFINITY
      -INFINITY +/-  int      : -INFINITY
   
  * score_attribute 
    an alternative to the score attribute that provides extra flexibility.
   Each node matched by the rule has its score adjusted differently, according to its value for the named node attribute.
   Thus in the example below, if score_attribute="installed_ram" and nodeA would have its preference to run "the resource" increased by 1024 whereas nodeB would have its preference increased only by half as much.
 
     <nodes>
       <node id="uuid1" uname="nodeA" type="normal">
         <instance_attributes id="uuid1:custom_attrs">
           <attributes>
             <nvpair id="uuid1:installed_ram" name="installed_ram" value="1024"/>
             <nvpair id="uuid1:my_other_attr" name="my_other_attr" value="bob"/>
           </attributes>
         </instance_attributes>
       </node>
       <node id="uuid2" uname="nodeB" type="normal">
         <instance_attributes id="uuid2:custom_attrs">
           <attributes>
             <nvpair id="uuid2:installed_ram" name="installed_ram" value="512"/>
           </attributes>
         </instance_attributes>
       </node>
     </nodes>
 -->
 <!ELEMENT rule (expression|date_expression|rule)*>
 <!ATTLIST rule
           id                  CDATA          #REQUIRED
           role                CDATA          #IMPLIED
           score               CDATA          #IMPLIED
           score_attribute     CDATA          #IMPLIED
           boolean_op          (or|and)      'and'>
 
 <!--
 Returns TRUE or FALSE depending on the properties of the object being tested.
 
  * type determines how the values being tested.
    * integer Values are converted to floats before being compared.
    * version The "version" type is intended to solve the problem of comparing 1.2 and 1.10
    * string Uses strcmp
 
 Two built-in attributes are node id #id and node uname #uname so that:
       attribute=#id value=8C05CA5C-C9E3-11D8-BEE6-000A95B71D78 operation=eq, and
       attribute=#uname value=test1 operation=eq
 would both be valid tests.
 
 An extra built-in attribute called #is_dc will be set to true or false depending on whether the node is operating as the DC for the cluster.
 Valid tests using this test would be of the form:
 
         attribute=#is_dc operation=eq value=true,  and
         attribute=#is_dc operation=eq value=false, and
         attribute=#is_dc operation=ne value=false
                         (for those liking double negatives :))
 -->
 <!ELEMENT expression EMPTY>
 <!ATTLIST expression
           id         CDATA                    #REQUIRED
           attribute  CDATA                    #REQUIRED
           operation  (lt|gt|lte|gte|eq|ne|defined|not_defined) #REQUIRED
           value      CDATA                    #IMPLIED
           type       (number|string|version) 'string'>
 
 <!--
  * start : A date-time conforming to the ISO8601 specification.
  * end : A date-time conforming to the ISO8601 specification.
    A value for end may, for any usage, be omitted and instead inferred using start and duration.
  * operation
    * gt : Compares the current date-time with start date.
      Checks now > start.
    * lt : Compares the current date-time with end date.
      Checks end > now
    * in_range : Compares the current date-time with start and end.
      Checks now > start and end > now.
      If either start or end is omitted, then that part of the comparision is not performed.
    * date_spec : Performs a cron-like comparision between the contents of date_spec and now.
      If values for start and/or end are included, now must also be within that range.
      Or in other words, the date_spec operation can also be made to perform an extra in_range check.
 
 NOTE: Because the comparisions (except for date_spec) include the time, the eq, neq, gte and lte operators have not been implemented.
 -->
 <!ELEMENT date_expression (date_spec?,duration?)>
 <!ATTLIST date_expression
         id         CDATA  #REQUIRED
         operation  (in_range|date_spec|gt|lt) 'in_range'
         start      CDATA  #IMPLIED
         end        CDATA  #IMPLIED>
 
 <!--
 date_spec is used for (surprisingly  ) date_spec operations.
 
 Fields that are not supplied are ignored.
 
 Fields can contain a single number or a single range.
 Eg.
 monthdays="1" (Matches the first day of every month) and hours="09-17" (Matches hours between 9am and 5pm inclusive) are both valid values.
 weekdays="1,2" and weekdays="1-2,5-6" are NOT valid ranges.
 This may change in a future release.
 
  * seconds : Value range 0-59
  * minutes : Value range 0-59
  * hours : Value range 0-23
  * monthdays : Value range 0-31 (depending on current month and year)
  * weekdays : Value range 1-7 (1=Monday, 7=Sunday)
  * yeardays : Value range 1-366 (depending on current year)
  * months : Value range 1-12
  * weeks : Value range 1-53 (depending on weekyear)
  * weekyears : Value range 0...
   (NOTE: weekyears may differ from Gregorian years.
   Eg. 2005-001 Ordinal == 2005-01-01 Gregorian == 2004-W53-6 Weekly )
  * years : Value range 0...
  * moon : Value range 0..7 - 0 is new, 4 is full moon.
    Because we can(tm)
 -->
 
 <!ELEMENT date_spec EMPTY>
 <!ATTLIST date_spec
         id         CDATA  #REQUIRED
         hours      CDATA  #IMPLIED
         monthdays  CDATA  #IMPLIED
         weekdays   CDATA  #IMPLIED
         yeardays   CDATA  #IMPLIED
         months     CDATA  #IMPLIED
         weeks      CDATA  #IMPLIED
         weekyears  CDATA  #IMPLIED
         years      CDATA  #IMPLIED
         moon       CDATA  #IMPLIED>
 
 <!--
 duration is optionally used for calculating a value for end.
 Any field not supplied is assumed to be zero and ignored.
 Negative values might work.
 Eg. months=11 should be equivalent to writing years=1, months=-1 but is not encouraged.
 -->
 <!ELEMENT duration EMPTY>
 <!ATTLIST duration
         id         CDATA  #REQUIRED
         hours      CDATA  #IMPLIED
         monthdays  CDATA  #IMPLIED
         weekdays   CDATA  #IMPLIED
         yeardays   CDATA  #IMPLIED
         months     CDATA  #IMPLIED
         weeks      CDATA  #IMPLIED
         years      CDATA  #IMPLIED>
 <!--
 Example 1: True if now is any time in the year 2005.
 
 <rule id="rule1">
   <date_expression id="date_expr1" start="2005-001" operation="in_range">
     <duration years="1"/>
   </date_expression>
 </rule>
 Example 2: Equivalent expression.
 
 <rule id="rule2">
   <date_expression id="date_expr2" operation="date_spec">
     <date_spec years="2005"/>
   </date_expression>
 </rule>
 Example 3: 9am-5pm, Mon-Friday
 
 <rule id="rule3">
   <date_expression id="date_expr3" operation="date_spec">
     <date_spec hours="9-16" days="1-5"/>
   </date_expression>
 </rule>
 Example 4: 9am-5pm, Mon-Friday, or all day saturday
 
 <rule id="rule4" boolean_op="or">
   <date_expression id="date_expr4-1" operation="date_spec">
     <date_spec hours="9-16" days="1-5"/>
   </date_expression>
   <date_expression id="date_expr4-2" operation="date_spec">
     <date_spec days="6"/>
   </date_expression>
 </rule>
 Example 5: 9am-5pm or 9pm-12pm, Mon-Friday
 
 <rule id="rule5" boolean_op="and">
   <rule id="rule5-nested1" boolean_op="or">
     <date_expression id="date_expr5-1" operation="date_spec">
       <date_spec hours="9-16"/>
     </date_expression>
     <date_expression id="date_expr5-2" operation="date_spec">
       <date_spec hours="21-23"/>
     </date_expression>
   </rule>
   <date_expression id="date_expr5-3" operation="date_spec">
     <date_spec days="1-5"/>
   </date_expression>
 </rule>
 Example 6: Mondays in March 2005
 
 <rule id="rule6" boolean_op="and">
   <date_expression id="date_expr6" operation="date_spec" start="2005-03-01" end="2005-04-01">
     <date_spec weekdays="1"/>
   </date_expression>
 </rule>
 NOTE: Because no time is specified, 00:00:00 is implied.
 This means that the range includes all of 2005-03-01 but none of 2005-04-01.
 You may wish to write end="2005-03-31T23:59:59" to avoid confusion.
 
 Example 7: Friday the 13th if it is a full moon
 
 <rule id="rule7" boolean_op="and">
   <date_expression id="date_expr7" operation="date_spec">
     <date_spec weekdays="5" monthdays="13" moon="4"/>
   </date_expression>
 </rule>
 -->
 
 <!--
 You don't have to give a value.
 There's a difference between a key not being present and a key not having a value.
 -->
 <!ELEMENT nvpair EMPTY>
 <!ATTLIST nvpair
           id     CDATA  #REQUIRED
           name   CDATA  #REQUIRED
           value  CDATA  #IMPLIED>
 
 <!ELEMENT attributes (nvpair*)>
 
 <!--
 These attributes take effect only if no value has previously been applied as part of the node's definition.
 Additionally, when the node reboots all settings made here are erased.
 
 id must be the UUID of the node.
 -->
 <!ELEMENT transient_attributes (instance_attributes*)>
 <!ATTLIST transient_attributes id CDATA #IMPLIED>
 
 <!--=========== Status - Advanced Use Only ===========-->
 
 <!--
 Details about the status of each node configured.
 
 HERE BE DRAGONS
 
 Never, ever edit this section directly or using cibadmin.
 The consequences of doing so are many and varied but rarely ever good or what you anticipated.
 To discourage this, the status section is no longer even written to disk, and is always discarded at startup.
 
 To avoid duplication of data, state entries only carry references to nodes and resources.
 -->
 <!ELEMENT status (node_state*)>
 
 <!--
 The state of a given node.
 
 This information is updated by the DC based on inputs from sources such as the CCM, status messages from remote LRMs and requests from other nodes.
  * id       -  is the node's UUID.
  * uname    - is the result of uname -n for the node.
  * crmd     - records whether the crmd process is running on the node
  * in_ccm   - records whether the node is part of our membership partition
  * join     - is the node's membership status with the current DC.
  * expected - is the DC's expectation of whether the node is up or not.
  * shutdown - is set to the time at which the node last asked to be shut down
 
 Ideally, there should be a node_state entry for every entry in the <nodes> list.
 
 -->
 <!ELEMENT node_state (transient_attributes|lrm)*>
 <!ATTLIST node_state
         id              CDATA                   #REQUIRED
         uname           CDATA                   #REQUIRED
         ha              (active|dead)           #IMPLIED
         crmd            (online|offline)        'offline'
         join            (pending|member|down)   'down'
         expected        (pending|member|down)   'down'
         in_ccm          (true|yes|1|false|no|0) 'false'
         crm-debug-origin CDATA                  #IMPLIED
         shutdown        CDATA                   #IMPLIED
         clear_shutdown  CDATA                   #IMPLIED>
 
 <!--
 Information from the Local Resource Manager of the node.
 It contains a list of all resource's added (but not necessarily still active) on the node.
 -->
 <!ELEMENT lrm (lrm_resources)>
 <!ATTLIST lrm id CDATA #REQUIRED>
 
 <!ELEMENT lrm_resources (lrm_resource*)>
 <!ELEMENT lrm_resource (lrm_rsc_op*)>
 <!ATTLIST lrm_resource
           id            CDATA #REQUIRED
           class             (lsb|ocf|heartbeat|stonith) #REQUIRED
           type              CDATA        #REQUIRED
           provider          CDATA        #IMPLIED>
 <!--
 lrm_rsc_op (Resource Status)
 
 id: Set to [operation] +"_"+ [operation] +"_"+ [an_interval_in_milliseconds]
 
 operation typically start, stop, or monitor
 
 call_id: Supplied by the LRM, determins the order of in which lrm_rsc_op objects should be processed in order to determin the resource's true state
 
 rc_code is the last return code from the resource
 
 rsc_state is the state of the resource after the action completed and should be used as a guide only.
 
 transition_key contains an identifier and seqence number for the transition.
 
 At startup, the TEngine registers the identifier and starts the sequence at zero.
 It is used to identify the source of resource actions.
 
 transition_magic contains an identifier containing call_id, rc_code, and {{transition_key}}}.
 
 As the name suggests, it is a piece of magic that allows the TE to always identify the action from the stream of xml-diffs it subscribes to from the CIB.
 
 last_run       ::= when did the op run (as age)
 last_rc_change ::= last rc change (as age)
 exec_time      ::= time it took the op to run
 queue_time     ::= time spent in queue
 
 op_status is supplied by the LRM and conforms to this enum:
 
 typedef enum {
         LRM_OP_PENDING = -1,
         LRM_OP_DONE,
         LRM_OP_CANCELLED,
         LRM_OP_TIMEOUT,
         LRM_OP_NOTSUPPORTED,
         LRM_OP_ERROR,
 } op_status_t;
 The parameters section allows us to detect when a resource's definition has changed and the needs to be restarted (so the changes take effect).
 -->
 <!ELEMENT lrm_rsc_op EMPTY>
 <!ATTLIST lrm_rsc_op
           id                    CDATA #REQUIRED
           operation             CDATA #REQUIRED
           op_status             CDATA #REQUIRED
           rc_code               CDATA #REQUIRED
           call_id               CDATA #REQUIRED
           crm_feature_set       CDATA #REQUIRED
           crm-debug-origin      CDATA #IMPLIED
           migrate_from          CDATA #IMPLIED
           transition_key        CDATA #IMPLIED
           op_digest             CDATA #IMPLIED
           op_restart_digest     CDATA #IMPLIED
           op_force_restart      CDATA #IMPLIED
 
           last_run              CDATA #IMPLIED
           last_rc_change        CDATA #IMPLIED
           exec_time             CDATA #IMPLIED
           queue_time            CDATA #IMPLIED
 
           interval              CDATA #REQUIRED
           transition_magic      CDATA #REQUIRED>
diff --git a/xml/crm-transitional.dtd b/xml/crm-transitional.dtd
index b29b52d84c..a0f1a8c3c4 100644
--- a/xml/crm-transitional.dtd
+++ b/xml/crm-transitional.dtd
@@ -1,885 +1,885 @@
 <?xml version="1.0" encoding="UTF-8" ?>
 <!--
 GLOBAL TODOs:
 
 Versionize DTD so we can validate against a specific version
 
 Background
  The CIB is described quite well in section 5 of the crm.txt (checked into CVS in the crm directory) so it is not repeated here.
  Suffice to say that it stores the configuration and runtime data required for cluster-wide resource management in XML format.
 
 CIB: Information Structure
  The CIB is divided into two main sections: The "static" configuration part and the "dynamic" status.
 
  The configuration contains - surprisingly - the configuration of the cluster, namely node attributes, resource instance configuration, and the constraints which describe the dependencies between all these.
  To identify the most recent configuration available in the cluster, this section is time-stamped with the unique timestamp of the last update.
 
  The status part is dynamically generated / updated by the CRM system and represents the current status of the cluster; which nodes are up, down or crashed, which resources are running where etc.
 
  Every information carrying object has an "id" tag, which is basically the UUID of it, should we ever need to access it directly.
  Unless otherwise stated, the id field is a short name consisting simple ascii characters [a-zA-Z0-9_\-]
  The exception is for resources because the LRM can support only id's of up to 64 characters.
 
 Other Notes
  The description field in all elements is opaque to the CRM and is for administrative comments.
 
 TODO
  * Figure out a sane way to version the DTD
  * Do we need to know about ping nodes...?
  * The integer comparison type really should be number
 -->
 <!ELEMENT cib (configuration, status)>
 <!ATTLIST cib
           cib-last-written CDATA        #IMPLIED
 
           admin_epoch  CDATA        #IMPLIED
           epoch        CDATA        #REQUIRED
           num_updates  CDATA        #IMPLIED
           num_peers    CDATA        #IMPLIED        
 
           cib_feature_revision  CDATA   #IMPLIED
           crm_feature_set       CDATA   #IMPLIED
           remote_access_port    CDATA   #IMPLIED        
 
           dc-uuid               CDATA   #IMPLIED
           have-quorum           (true|yes|1|false|no|0)  'false'
 	  no-quorum-panic       (true|yes|1|false|no|0)  'false'	
 
           validate-with         CDATA   #IMPLIED        
           remote-tls-port       CDATA   #IMPLIED        
 
           dc_uuid        CDATA             #IMPLIED
           ccm_transition CDATA             #IMPLIED
           have_quorum    (true|yes|1|false|no|0)  'false'
           ignore_dtd     (true|yes|1|false|no|0)  #IMPLIED
 
           generated      CDATA    #IMPLIED        
           crm-debug-origin CDATA    #IMPLIED>
 
 <!--
 The CIB's version is a tuple of admin_epoch, epoch and num_updates (in that order).
 
 This is used when applying updates from the master CIB instance.
 
 Additionally, num_peers and have_quorum are used during the election process to determin who has the latest configuration.
  * num_updates is incremented every time the CIB changes.
  * epoch is incremented after every DC election.
  * admin_epoch is exclusivly for the admin to change.
  * num_peers is the number of CIB instances that we can talk to
  * have_quorum is derived from the ConsensusClusterMembership layer
  * dc_uuid stored the UUID of the current DesignatedController
  * ccm_transition stores the membership instance from the ConsensusClusterMembership layer.
  * cib_feature_revision is the feature set that this configuration requires
 -->
 <!ELEMENT configuration (crm_config, nodes, resources, constraints)>
 
 <!--
 crm_config
 
 Used to specify cluster-wide options.
 
 The use of multiple cluster_property_set sections and time-based rule expressions allows the the cluster to behave differently (for example) during business hours than it does overnight.
 -->
 <!ELEMENT crm_config (cluster_property_set)*>
 
 <!--
 Current crm_config options:
 
  * transition_idle_timeout (interval, default=60s):
    If no activity is recorded in this time, the transition is deemed failed as are all sent actions that have not yet been confirmed complete.
    If any operation initiated has an explicit higher timeout, the higher value applies.
 
  * symmetric_cluster (boolean, default=TRUE):
    If true, resources are permitted to run anywhere by default.
    Otherwise, explicit constraints must be created to specify where they can run.
 
  * stonith_enabled (boolean, default=FALSE):
    If true, failed nodes will be fenced.
 
  * no_quorum_policy (enum, default=stop)
    * ignore - Pretend we have quorum
    * freeze - Do not start any resources not currently in our partition.
      Resources in our partition may be moved to another node within the partition
      Fencing is disabled
    * stop - Stop all running resources in our partition
      Fencing is disabled
 
  * default_resource_stickiness
    Do we prefer to run on the existing node or be moved to a "better" one?
    * 0 : resources will be placed optimally in the system.
      This may mean they are moved when a "better" or less loaded node becomes available.
      This option is almost equivalent to auto_failback on except that the resource may be moved to other nodes than the one it was previously active on.
    * value > 0 : resources will prefer to remain in their current location but may be moved if a more suitable node is available.
      Higher values indicate a stronger preference for resources to stay where they are.
    * value < 0 : resources prefer to move away from their current location.
      Higher absolute values indicate a stronger preference for resources to be moved.
    * INFINITY : resources will always remain in their current locations until forced off because the node is no longer eligible to run the resource (node shutdown, node standby or configuration change).
      This option is almost equivalent to auto_failback off except that the resource may be moved to other nodes than the one it was previously active on.
    * -INFINITY : resources will always move away from their current location.
 
  * is_managed_default (boolean, default=TRUE)
    Unless the resource's definition says otherwise,
    * TRUE : resources will be started, stopped, monitored and moved as necessary/required
    * FALSE : resources will not started if stopped, stopped if started nor have any recurring actions scheduled.
 
  * stop_orphan_resources (boolean, default=TRUE (as of release 2.0.6))
    If a resource is found for which we have no definition for;
    * TRUE : Stop the resource
    * FALSE : Ignore the resource
    This mostly effects the CRM's behavior when a resource is deleted by an admin without it first being stopped.
 
  * stop_orphan_actions (boolean, default=TRUE)
    If a recurring action is found for which we have no definition for;
    * TRUE : Stop the action
    * FALSE : Ignore the action
    This mostly effects the CRM's behavior when the interval for a recurring action is changed.
 -->
 <!ELEMENT cluster_property_set (rule*, attributes)>
 <!ATTLIST cluster_property_set
           id                CDATA        #REQUIRED
           score             CDATA        #IMPLIED>
 
 <!ELEMENT nodes       (node*)>
 
 <!--
  * id    : the node's UUID.
  * uname : the result of uname -n
  * type  : should either be "normal" or "member" for nodes you with to run resources 
    "normal" is preferred as of version 2.0.4
 
 Each node can also have additional "instance" attributes.
 These attributes are completely arbitrary and can be used later in constraints.
 In this way it is possible to define groups of nodes to which a constraint can apply.
 
 It is also theoretically possible to have a process on each node which updates these values automatically.
 This would make it possible to have an attribute that represents "connected to SAN subsystem" or perhaps "system_load (low|medium|high)".
 
 Ideally it would be possible to have the CRMd on each node gather some of this information and automatically populate things like architecture and OS/kernel version.
 -->
 <!ELEMENT node (instance_attributes*)>
 <!ATTLIST node
           id            CDATA         #REQUIRED
           uname         CDATA         #REQUIRED
           description   CDATA         #IMPLIED
           type          (normal|member|ping) #REQUIRED>
 
 <!ELEMENT resources   (primitive|group|clone|master_slave)*>
 
 <!--
  * class
    Specifies the location and standard the resource script conforms to
    * ocf
      Most OCF RAs started out life as v1 Heartbeat resource agents.
      These have all been ported to meet the OCF specifications.
      As an added advantage, in accordance with the OCF spec, they also describe the parameters they take and what their defaults are.
      It is also easier to configure them as each part of the configuration is passed as its own parameter.
      In accordance with the OCF spec, each parameter is passed to the RA with an OCF_RESKEY_ prefix.
      So ip=192.168.1.1 in the CIB would be passed as OCF_RESKEY_ip=192.168.1.1.
      Located under /usr/lib/ocf/resource.d/heartbeat/.
    * lsb
      Most Linux init scripts conform to the LSB specification.
      The class allows you to use those that do as resource agents controlled by Heartbeat.
      Located in /etc/init.d/.
    * heartbeat
      This class gives you access to the v1 Heartbeat resource agents and allows you to reuse any custom agents you may have written.
      Located at /etc/heartbeat/resource.d/ or /etc/ha.d/resource.d.
 
  * type : The name of the ResourceAgent you wish to use.
 
  * provider
    The OCF spec allows multiple vendors to supply the same ResourceAgent.
    To use the OCF resource agents supplied with Heartbeat, you should specify heartbeat here
 
  * is_managed : Is the ClusterResourceManager in control of this resource.
    * true : (default) the resource will be started, stopped, monitored and moved as necessary/required
    * false : the resource will not started if stopped, stopped if started nor have any recurring actions scheduled.
      The resource may still be referenced in colocation constraints and ordering constraints (though obviously if no actions are performed on it then it will prevent the action on the other resource too)
 
  * restart_type
    Used when the other side of an ordering dependency is restarted/moved.
    * ignore : the default.
      Don't do anything extra.
    * restart
      Use this for example to have a restart of your database also trigger a restart of your web-server.
    * multiple_active
      Used when a resource is detected as being active on more than one machine.
      The default value, stop_start, will stop all instances and start only 1
    * block : don't do anything, wait for the administrator
    * stop_only : stop all the active instances
    * stop_start : start the resource on one node after having stopped all the active instances
 
  * resource_stickiness
    See the description of the default_resource_stickiness cluster attribute.
    resource_stickiness allows you to override the cluster's default for the individual resource.
 
 NOTE: primitive resources may contain at most one "operations" object.
       The CRM will complain about your configuration if this criteria is not met.
       Please use crm_verify to ensure your configuration is valid.
       The DTD is written this way to be order in-sensitive.
 -->
 <!ELEMENT primitive (operations|meta_attributes|instance_attributes)*>
 <!ATTLIST primitive
           id                CDATA        #REQUIRED
           description       CDATA        #IMPLIED
           class             (ocf|lsb|heartbeat|stonith) #REQUIRED
           type              CDATA        #REQUIRED
           provider          CDATA        #IMPLIED
 
           is_managed            CDATA                            #IMPLIED
           restart_type          (ignore|restart)                 'ignore'
           multiple_active       (stop_start|stop_only|block)     'stop_start'
           resource_stickiness   CDATA                             #IMPLIED>
 <!--
 This allows us to specify how long an action can take
 
  * name : the name of the operation.
    Supported operations are start, stop, & monitor
 
  * start_delay : delay the operation after starting the resource
    By default this value is in milliseconds, however you can also specify a value in seconds like so timeout="5s". Used for the monitor operation.
 
  * timeout : the maximum period of time before considering the action failed.
    By default this value is in milliseconds, however you can also specify a value in seconds like so timeout="5s".
 
  * interval : This currently only applies to monitor operations and specifies how often the LRM should check the resource is active.
    The same notation for timeout applies.
 
  * prereq : What conditions need to be met before this action can be run
    * nothing : This action can be performed at any time
    * quorum : This action requires the partition to have quorum
    * fencing : This action requires the partition to have quorum and any fencing operations to have completed before it can be executed
 
  * on_fail : The action to take if this action ever fails.
    * nothing : Pretend the action didnt actually fail
    * block : Take no further action on the resource - wait for the administrator to resolve the issue
    * restart : Stop the resource and re-allocate it elsewhere
    * stop : Stop the resource and DO NOT re-allocate it elsewhere
    * fence : Currently this means fence the node on which the resource is running.
      Any other resources currently active on the machine will be migrated away before fencing occurs.
 
 Only one entry per supported action+interval is currently permitted.
 Parameters specific to each operation can be passed using the instance_attributes section.
 -->
 <!ELEMENT operations (op*)>
 <!ELEMENT op (meta_attributes|instance_attributes)*>
 <!ATTLIST op
           id            CDATA         #REQUIRED
           name          CDATA         #REQUIRED
           description   CDATA         #IMPLIED
           interval      CDATA         #IMPLIED
           timeout       CDATA         #IMPLIED
           start_delay   CDATA         '0'
           disabled      (true|yes|1|false|no|0)        'false'
           role          (Master|Slave|Started|Stopped) 'Started'
           prereq        (nothing|quorum|fencing)       #IMPLIED
           on_fail       (ignore|block|stop|restart|fence)     #IMPLIED>
 <!--
 Use this to emulate v1 type Heartbeat groups.
 Defining a resource group is a quick way to make sure that the resources:
  * are all started on the same node, and
  * are started and stopped in the correct (sequential) order
 though either or both of these properties can be disabled.
 
 NOTE: Do not create empty groups.  
       They are temporarily supported because the GUI requires it but will be removed as soon as possible.
       The DTD is written this way to be order in-sensitive.
 -->
 <!ELEMENT group (meta_attributes|instance_attributes|primitive)*>
 <!ATTLIST group
           id            CDATA               #REQUIRED
           description   CDATA               #IMPLIED
 
           is_managed            CDATA                         #IMPLIED
           restart_type          (ignore|restart)              'ignore'
           multiple_active       (stop_start|stop_only|block)  'stop_start'
           resource_stickiness   CDATA			      #IMPLIED
 
           ordered               (true|yes|1|false|no|0)       'true'
           collocated            (true|yes|1|false|no|0)       'true'>
 <!--
 Clones are intended as a mechanism for easily starting a number of resources (such as a web-server) with the same configuration.
 As an added benefit, the number that should be started is an instance parameter and when combined with time-based constraints, allows the administrator to run more instances during peak times and save on resources during idle periods.
 
  * ordered
    Start (or stop) each clone only after the operation on the previous clone completed.
 
  * interleaved
    If a colocation constraint is created between two clone resources and interleaved is true, then clone N from one resource will be assigned the same location as clone N from the other resource.
    If the number of runnable clones differs, then the leftovers can be located anywhere.
 Using a cloned group is a much better way of achieving the same result.
 
  * notify
    If true, inform peers before and after any clone is stopped or started.
-   If an action failed, you will (currently) not recieve a post-notification.
+   If an action failed, you will (currently) not receive a post-notification.
    Instead you can next expect to see a pre-notification for a stop.
    If a stop fails, and you have fencing you will get a post-notification for the stop after the fencing operation has completed.
    In order to use the notification service ALL decendants of the clone MUST support the notify action.
    Currently this action is not permitted to fail, though depending on your configuration, can block almost indefinitly.
    Behaviour in response to a failed action or notificaiton is likely to be improved in future releases.
 
    See http://www.clusterlabs.org/doc/en-US/Pacemaker/1.0/html/Pacemaker_Explained/s-resource-clone.html for more information on notify actions
 
 
 NOTE: Clones must contain exactly one primitive or one group resource. 
       The CRM will complain about your configuration if this criteria is not met.
       Please use crm_verify to ensure your configuration is valid.
       The DTD is written this way to be order in-sensitive.
 -->
 
 <!ELEMENT clone (meta_attributes|instance_attributes|primitive|group)*>
 <!ATTLIST clone
           id            CDATA               #REQUIRED
           description   CDATA               #IMPLIED
 
           is_managed            CDATA                         #IMPLIED
           restart_type          (ignore|restart)              'ignore'
           multiple_active       (stop_start|stop_only|block)  'stop_start'
           resource_stickiness   CDATA                         #IMPLIED
 
           notify                (true|yes|1|false|no|0)       'false'
           globally_unique       (true|yes|1|false|no|0)       'true'
           ordered               (true|yes|1|false|no|0)       'false'
           interleave            (true|yes|1|false|no|0)       'false'>
 <!--
 Master/Slave resources are a superset of Clones in that instances can also be in one of two states.
 The meaning of the states is specific to the resource.
 
 NOTE: master_slave must contain exactly one primitive resource OR one group resource.
       It may not contain both, nor may it contain neither.
       The CRM will complain about your configuration if this criteria is not met.
       Please use crm_verify to ensure your configuration is valid.
       The DTD is written this way to be order in-sensitive.
 -->
 <!ELEMENT master_slave (meta_attributes|instance_attributes|primitive|group)*>
 <!ATTLIST master_slave
           id            CDATA       #REQUIRED
           description   CDATA       #IMPLIED
 
           is_managed            CDATA                         #IMPLIED
           restart_type          (ignore|restart)              'ignore'
           multiple_active       (stop_start|stop_only|block)  'stop_start'
           resource_stickiness   CDATA                         #IMPLIED
 
           notify                (true|yes|1|false|no|0)       'false'
           globally_unique       (true|yes|1|false|no|0)       'true'
           ordered               (true|yes|1|false|no|0)       'false'
           interleave            (true|yes|1|false|no|0)       'false'>
 
 <!--
 Most resource options are configured as instance attributes.
 Some of the built-in options can be configured directly on the resource or as an instance attribute.
 The advantage of using instance attributes is the added flexibility that can be achieved through conditional ?<rule/>s (see below).
 
 You can have multiple sets of 'instance attributes', they are first sorted by score and then processed.
 The first to have its ?<rule/> satisfied and define an attribute wins.
 Subsequent values for the attribute will be ignored.
 
 Note that:
  * instance_attributes sets with id equal to cib-bootstrap-options are treated as if they have a score of INFINITY.
  * instance_attributes sets with no score implicitly have a score of zero.
  * instance_attributes sets with no rule implicitly have a rule that evaluates to true.
 
 The addition of conditional <rule/>s to the instance_attributes object allows for an infinite variety of configurations.
 Just some of the possibilities are:
  * Specify different resource parameters
    * depending on the node it is allocated to (a resource may need to use eth1 on host1 but eth0 on host2)
    * depending on the time of day (run 10 web-servers at night an 100 during the day)
  * Allow nodes to have different attributes depending on the time-of-day
    * Set resource_stickiness to avoid failback during business hours but allow resources to be moved to a more preferred node on the weekend
    * Switch a node between a "front-end" processing group during the day to a "back-end" group at night.
 
 Common instance attributes for all resource types:
  * priority (integer, default=0):
    dictates the order in which resources will be processed.
    If there is an insufficient number of nodes to run all resources, the lower priority resources will be stopped to make sure the higher priority resources remain active.
 
  * is_managed: See previous description.
 
  * resource_stickiness: See previous description.
 
  * target_role: (Started|Stopped|Master|Slave|default, default=#default)
    * #default : Let the cluster decide what to do with the resource
    * Started : Ignore any specified value of is_managed or is_managed_default and attempt to start the resource
    * Stopped : Ignore any specified value of is_managed or is_managed_default and attempt to stop the resource
    * Master : Ignore any specified value of is_managed, is_managed_default or promotion preferences and attempt to put all instances of a cloned resource into Master mode.
    * Slave : Ignore any specified value of is_managed, is_managed_default or promotion preferences and attempt to put all instances of a cloned resource into Slave mode.
 
 Common instance attributes for clones:
  * clone_max (integer, default=1):
    the number of clones to be run
 
 * clone_node_max (integer, default=1):
   the maximum number of clones to be run on a single node
 
 Common instance attributes for nodes:
  * standby (boolean, default=FALSE)
    if TRUE, indicates that resources can not be run on the node
 -->
 <!ELEMENT instance_attributes (rule*, attributes)>
 <!ATTLIST instance_attributes
           id                CDATA        #REQUIRED
           score             CDATA        #IMPLIED>
 
 <!ELEMENT meta_attributes (rule*, attributes)>
 <!ATTLIST meta_attributes
           id                CDATA        #REQUIRED
           score             CDATA        #IMPLIED>
 
 <!--
 Every constraint entry also has a 'lifetime' attribute, which expresses when this constraint is applicable.
 For example, a constraint may only be valid during certain times of the day, or days of the week.
 Eventually, we would like to be able to support constraints that only last until events such as the next reboot or the next transition.
 -->
 <!ELEMENT constraints (rsc_order|rsc_colocation|rsc_location)*>
 
 <!--
 rsc_ordering constraints express dependencies between the actions on two resources.
  * from : A resource id
  * action : What action does this constraint apply to.
  * type : Should the action on from occur before or after action on to
  * to : A resource id
  * symmetrical : If TRUE, create the reverse constraint for the other action also.
 
 Read as:
      action from type to_action to
 eg. 
      start rsc1 after promote rsc2
 
 -->
 <!ELEMENT rsc_order (lifetime?)>
 <!ATTLIST rsc_order
           id        CDATA #REQUIRED
           from      CDATA #REQUIRED
           to        CDATA #REQUIRED
           action    CDATA		'start'
           to_action CDATA		'start'
           type      (before|after)	'after'
           score     CDATA		'INFINITY'
           symmetrical (true|yes|1|false|no|0)	'true'>
 
 <!--
 
 Specify where a resource should run relative to another resource
 
 Make rsc 'from' run on the same machine as rsc 'to'
 
 If rsc 'to' cannot run anywhere and 'score' is INFINITY, 
   then rsc 'from' wont be allowed to run anywhere either
 If rsc 'from' cannot run anywhere, then 'to' wont be affected
 
 -->
 <!ELEMENT rsc_colocation (lifetime?)>
 <!ATTLIST rsc_colocation
           id             CDATA #REQUIRED
           from           CDATA #REQUIRED
           from_role      CDATA #IMPLIED
           to             CDATA #REQUIRED
           to_role        CDATA #IMPLIED
           symmetrical    (true|yes|1|false|no|0)	'false'
           node_attribute CDATA #IMPLIED
           score          CDATA #REQUIRED>
 
 <!--
 Specify which nodes are eligible for running a given resource.
 
 During processing, all rsc_location for a given rsc are evaluated.
 
 All nodes start out with their base weight (which defaults to zero).
 This can then be modified (up or down) using any number of rsc_location constraints.
 
 Then the highest non-zero available node is determined to place the resource.
 If multiple nodes have the same weighting, the node with the fewest running resources is chosen.
 
 The rsc field is, surprisingly, a resource id.
 -->
 <!ELEMENT rsc_location (lifetime?,rule*)>
 <!ATTLIST rsc_location
           id          CDATA #REQUIRED
           description CDATA #IMPLIED
           rsc         CDATA #REQUIRED
           node        CDATA #IMPLIED
           score       CDATA #IMPLIED>
 <!ELEMENT lifetime (rule+)>
 <!ATTLIST lifetime id  CDATA     #REQUIRED>
 
 <!--
  * boolean_op
    determines how the results of multiple expressions are combined.
 
  * role
    limits this rule to applying to Multi State resources with the named role.
    Roles include Started, Stopped, Slave, Master though only the last two are considered useful.
    NOTE: A rule with role="Master" can not determin the initial location of a clone instance.
    It will only affect which of the active instances will be promoted.
 
  * score
    adjusts the preference for running on the matched nodes.
    NOTE: Nodes that end up with a negative score will never run the resource.
    Two special values of "score" exist: INFINITY and -INFINITY.
    Processing of these special values is as follows:
 
       INFINITY +/- -INFINITY : -INFINITY
       INFINITY +/-  int      :  INFINITY
      -INFINITY +/-  int      : -INFINITY
   
  * score_attribute 
    an alternative to the score attribute that provides extra flexibility.
   Each node matched by the rule has its score adjusted differently, according to its value for the named node attribute.
   Thus in the example below, if score_attribute="installed_ram" and nodeA would have its preference to run "the resource" increased by 1024 whereas nodeB would have its preference increased only by half as much.
 
     <nodes>
       <node id="uuid1" uname="nodeA" type="normal">
         <instance_attributes id="uuid1:custom_attrs">
           <attributes>
             <nvpair id="uuid1:installed_ram" name="installed_ram" value="1024"/>
             <nvpair id="uuid1:my_other_attr" name="my_other_attr" value="bob"/>
           </attributes>
         </instance_attributes>
       </node>
       <node id="uuid2" uname="nodeB" type="normal">
         <instance_attributes id="uuid2:custom_attrs">
           <attributes>
             <nvpair id="uuid2:installed_ram" name="installed_ram" value="512"/>
           </attributes>
         </instance_attributes>
       </node>
     </nodes>
 -->
 <!ELEMENT rule (expression|date_expression|rule)*>
 <!ATTLIST rule
           id                  CDATA          #REQUIRED
           role                CDATA          #IMPLIED
           score               CDATA          #IMPLIED
           score_attribute     CDATA          #IMPLIED
           boolean_op          (or|and)      'and'>
 
 <!--
 Returns TRUE or FALSE depending on the properties of the object being tested.
 
  * type determines how the values being tested.
    * integer Values are converted to floats before being compared.
    * version The "version" type is intended to solve the problem of comparing 1.2 and 1.10
    * string Uses strcmp
 
 Two built-in attributes are node id #id and node uname #uname so that:
       attribute=#id value=8C05CA5C-C9E3-11D8-BEE6-000A95B71D78 operation=eq, and
       attribute=#uname value=test1 operation=eq
 would both be valid tests.
 
 An extra built-in attribute called #is_dc will be set to true or false depending on whether the node is operating as the DC for the cluster.
 Valid tests using this test would be of the form:
 
         attribute=#is_dc operation=eq value=true,  and
         attribute=#is_dc operation=eq value=false, and
         attribute=#is_dc operation=ne value=false
                         (for those liking double negatives :))
 -->
 <!ELEMENT expression EMPTY>
 <!ATTLIST expression
           id         CDATA                    #REQUIRED
           attribute  CDATA                    #REQUIRED
           operation  (lt|gt|lte|gte|eq|ne|defined|not_defined) #REQUIRED
           value      CDATA                    #IMPLIED
           type       (number|string|version) 'string'>
 
 <!--
  * start : A date-time conforming to the ISO8601 specification.
  * end : A date-time conforming to the ISO8601 specification.
    A value for end may, for any usage, be omitted and instead inferred using start and duration.
  * operation
    * gt : Compares the current date-time with start date.
      Checks now > start.
    * lt : Compares the current date-time with end date.
      Checks end > now
    * in_range : Compares the current date-time with start and end.
      Checks now > start and end > now.
      If either start or end is omitted, then that part of the comparision is not performed.
    * date_spec : Performs a cron-like comparision between the contents of date_spec and now.
      If values for start and/or end are included, now must also be within that range.
      Or in other words, the date_spec operation can also be made to perform an extra in_range check.
 
 NOTE: Because the comparisions (except for date_spec) include the time, the eq, neq, gte and lte operators have not been implemented.
 -->
 <!ELEMENT date_expression (date_spec?,duration?)>
 <!ATTLIST date_expression
         id         CDATA  #REQUIRED
         operation  (in_range|date_spec|gt|lt) 'in_range'
         start      CDATA  #IMPLIED
         end        CDATA  #IMPLIED>
 
 <!--
 date_spec is used for (surprisingly  ) date_spec operations.
 
 Fields that are not supplied are ignored.
 
 Fields can contain a single number or a single range.
 Eg.
 monthdays="1" (Matches the first day of every month) and hours="09-17" (Matches hours between 9am and 5pm inclusive) are both valid values.
 weekdays="1,2" and weekdays="1-2,5-6" are NOT valid ranges.
 This may change in a future release.
 
  * seconds : Value range 0-59
  * minutes : Value range 0-59
  * hours : Value range 0-23
  * monthdays : Value range 0-31 (depending on current month and year)
  * weekdays : Value range 1-7 (1=Monday, 7=Sunday)
  * yeardays : Value range 1-366 (depending on current year)
  * months : Value range 1-12
  * weeks : Value range 1-53 (depending on weekyear)
  * weekyears : Value range 0...
   (NOTE: weekyears may differ from Gregorian years.
   Eg. 2005-001 Ordinal == 2005-01-01 Gregorian == 2004-W53-6 Weekly )
  * years : Value range 0...
  * moon : Value range 0..7 - 0 is new, 4 is full moon.
    Because we can(tm)
 -->
 
 <!ELEMENT date_spec EMPTY>
 <!ATTLIST date_spec
         id         CDATA  #REQUIRED
         hours      CDATA  #IMPLIED
         monthdays  CDATA  #IMPLIED
         weekdays   CDATA  #IMPLIED
         yeardays   CDATA  #IMPLIED
         months     CDATA  #IMPLIED
         weeks      CDATA  #IMPLIED
         weekyears  CDATA  #IMPLIED
         years      CDATA  #IMPLIED
         moon       CDATA  #IMPLIED>
 
 <!--
 duration is optionally used for calculating a value for end.
 Any field not supplied is assumed to be zero and ignored.
 Negative values might work.
 Eg. months=11 should be equivalent to writing years=1, months=-1 but is not encouraged.
 -->
 <!ELEMENT duration EMPTY>
 <!ATTLIST duration
         id         CDATA  #REQUIRED
         hours      CDATA  #IMPLIED
         monthdays  CDATA  #IMPLIED
         weekdays   CDATA  #IMPLIED
         yeardays   CDATA  #IMPLIED
         months     CDATA  #IMPLIED
         weeks      CDATA  #IMPLIED
         years      CDATA  #IMPLIED>
 <!--
 Example 1: True if now is any time in the year 2005.
 
 <rule id="rule1">
   <date_expression id="date_expr1" start="2005-001" operation="in_range">
     <duration years="1"/>
   </date_expression>
 </rule>
 Example 2: Equivalent expression.
 
 <rule id="rule2">
   <date_expression id="date_expr2" operation="date_spec">
     <date_spec years="2005"/>
   </date_expression>
 </rule>
 Example 3: 9am-5pm, Mon-Friday
 
 <rule id="rule3">
   <date_expression id="date_expr3" operation="date_spec">
     <date_spec hours="9-16" days="1-5"/>
   </date_expression>
 </rule>
 Example 4: 9am-5pm, Mon-Friday, or all day saturday
 
 <rule id="rule4" boolean_op="or">
   <date_expression id="date_expr4-1" operation="date_spec">
     <date_spec hours="9-16" days="1-5"/>
   </date_expression>
   <date_expression id="date_expr4-2" operation="date_spec">
     <date_spec days="6"/>
   </date_expression>
 </rule>
 Example 5: 9am-5pm or 9pm-12pm, Mon-Friday
 
 <rule id="rule5" boolean_op="and">
   <rule id="rule5-nested1" boolean_op="or">
     <date_expression id="date_expr5-1" operation="date_spec">
       <date_spec hours="9-16"/>
     </date_expression>
     <date_expression id="date_expr5-2" operation="date_spec">
       <date_spec hours="21-23"/>
     </date_expression>
   </rule>
   <date_expression id="date_expr5-3" operation="date_spec">
     <date_spec days="1-5"/>
   </date_expression>
 </rule>
 Example 6: Mondays in March 2005
 
 <rule id="rule6" boolean_op="and">
   <date_expression id="date_expr6" operation="date_spec" start="2005-03-01" end="2005-04-01">
     <date_spec weekdays="1"/>
   </date_expression>
 </rule>
 NOTE: Because no time is specified, 00:00:00 is implied.
 This means that the range includes all of 2005-03-01 but none of 2005-04-01.
 You may wish to write end="2005-03-31T23:59:59" to avoid confusion.
 
 Example 7: Friday the 13th if it is a full moon
 
 <rule id="rule7" boolean_op="and">
   <date_expression id="date_expr7" operation="date_spec">
     <date_spec weekdays="5" monthdays="13" moon="4"/>
   </date_expression>
 </rule>
 -->
 
 <!--
 You don't have to give a value.
 There's a difference between a key not being present and a key not having a value.
 -->
 <!ELEMENT nvpair EMPTY>
 <!ATTLIST nvpair
           id     CDATA  #REQUIRED
           name   CDATA  #REQUIRED
           value  CDATA  #IMPLIED>
 
 <!ELEMENT attributes (nvpair*)>
 
 <!--
 These attributes take effect only if no value has previously been applied as part of the node's definition.
 Additionally, when the node reboots all settings made here are erased.
 
 id must be the UUID of the node.
 -->
 <!ELEMENT transient_attributes (instance_attributes*)>
 <!ATTLIST transient_attributes id CDATA #IMPLIED>
 
 <!--=========== Status - Advanced Use Only ===========-->
 
 <!--
 Details about the status of each node configured.
 
 HERE BE DRAGONS
 
 Never, ever edit this section directly or using cibadmin.
 The consequences of doing so are many and varied but rarely ever good or what you anticipated.
 To discourage this, the status section is no longer even written to disk, and is always discarded at startup.
 
 To avoid duplication of data, state entries only carry references to nodes and resources.
 -->
 <!ELEMENT status (node_state*)>
 
 <!--
 The state of a given node.
 
 This information is updated by the DC based on inputs from sources such as the CCM, status messages from remote LRMs and requests from other nodes.
  * id       -  is the node's UUID.
  * uname    - is the result of uname -n for the node.
  * crmd     - records whether the crmd process is running on the node
  * in_ccm   - records whether the node is part of our membership partition
  * join     - is the node's membership status with the current DC.
  * expected - is the DC's expectation of whether the node is up or not.
  * shutdown - is set to the time at which the node last asked to be shut down
 
 Ideally, there should be a node_state entry for every entry in the <nodes> list.
 
 -->
 <!ELEMENT node_state (transient_attributes|lrm)*>
 <!ATTLIST node_state
         id              CDATA                   #REQUIRED
         uname           CDATA                   #REQUIRED
         ha              (active|dead)           #IMPLIED
         crmd            (online|offline)        'offline'
         join            (pending|member|down)   'down'
         expected        (pending|member|down)   'down'
         in_ccm          (true|yes|1|false|no|0) 'false'
         crm-debug-origin CDATA                  #IMPLIED
         shutdown        CDATA                   #IMPLIED
         clear_shutdown  CDATA                   #IMPLIED>
 
 <!--
 Information from the Local Resource Manager of the node.
 It contains a list of all resource's added (but not necessarily still active) on the node.
 -->
 <!ELEMENT lrm (lrm_resources)>
 <!ATTLIST lrm id CDATA #REQUIRED>
 
 <!ELEMENT lrm_resources (lrm_resource*)>
 <!ELEMENT lrm_resource (lrm_rsc_op*)>
 <!ATTLIST lrm_resource
           id            CDATA #REQUIRED
           class             (lsb|ocf|heartbeat|stonith) #REQUIRED
           type              CDATA        #REQUIRED
           provider          CDATA        #IMPLIED>
 <!--
 lrm_rsc_op (Resource Status)
 
 id: Set to [operation] +"_"+ [operation] +"_"+ [an_interval_in_milliseconds]
 
 operation typically start, stop, or monitor
 
 call_id: Supplied by the LRM, determins the order of in which lrm_rsc_op objects should be processed in order to determin the resource's true state
 
 rc_code is the last return code from the resource
 
 rsc_state is the state of the resource after the action completed and should be used as a guide only.
 
 transition_key contains an identifier and seqence number for the transition.
 
 At startup, the TEngine registers the identifier and starts the sequence at zero.
 It is used to identify the source of resource actions.
 
 transition_magic contains an identifier containing call_id, rc_code, and {{transition_key}}}.
 
 As the name suggests, it is a piece of magic that allows the TE to always identify the action from the stream of xml-diffs it subscribes to from the CIB.
 
 last_run       ::= when did the op run (as age)
 last_rc_change ::= last rc change (as age)
 exec_time      ::= time it took the op to run
 queue_time     ::= time spent in queue
 
 op_status is supplied by the LRM and conforms to this enum:
 
 typedef enum {
         LRM_OP_PENDING = -1,
         LRM_OP_DONE,
         LRM_OP_CANCELLED,
         LRM_OP_TIMEOUT,
         LRM_OP_NOTSUPPORTED,
         LRM_OP_ERROR,
 } op_status_t;
 The parameters section allows us to detect when a resource's definition has changed and the needs to be restarted (so the changes take effect).
 -->
 <!ELEMENT lrm_rsc_op EMPTY>
 <!ATTLIST lrm_rsc_op
           id                    CDATA #REQUIRED
           operation             CDATA #REQUIRED
           op_status             CDATA #IMPLIED
           rc_code               CDATA #IMPLIED
           call_id               CDATA #IMPLIED
           crm_feature_set       CDATA #REQUIRED
           crm-debug-origin      CDATA #IMPLIED
           migrate_from          CDATA #IMPLIED
           transition_key        CDATA #IMPLIED
           op_digest             CDATA #IMPLIED
           op_restart_digest     CDATA #IMPLIED
           op_force_restart      CDATA #IMPLIED
 
           last_run              CDATA #IMPLIED
           last_rc_change        CDATA #IMPLIED
           exec_time             CDATA #IMPLIED
           queue_time            CDATA #IMPLIED
 
           interval              CDATA #REQUIRED
           transition_magic      CDATA #IMPLIED
 
           op-status             CDATA #IMPLIED
           rc-code               CDATA #IMPLIED
           call-id               CDATA #IMPLIED
           migrate-from          CDATA #IMPLIED
           transition-key        CDATA #IMPLIED
           transition-magic      CDATA #IMPLIED
           op-digest             CDATA #IMPLIED
           op-restart-digest     CDATA #IMPLIED
           op-force-restart      CDATA #IMPLIED
           last-run              CDATA #IMPLIED
           last-rc-change        CDATA #IMPLIED
           exec-time             CDATA #IMPLIED
           queue-time            CDATA #IMPLIED>