diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c
index 779cdadf80..8ca1c8534b 100644
--- a/daemons/fenced/fenced_remote.c
+++ b/daemons/fenced/fenced_remote.c
@@ -1,2361 +1,2361 @@
 /*
  * Copyright 2009-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 #include <regex.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/util.h>
 #include <pacemaker-fenced.h>
 
 #define TIMEOUT_MULTIPLY_FACTOR 1.2
 
 /* When one fencer queries its peers for devices able to handle a fencing
  * request, each peer will reply with a list of such devices available to it.
  * Each reply will be parsed into a peer_device_info_t, with each device's
  * information kept in a device_properties_t.
  */
 
 typedef struct device_properties_s {
     /* Whether access to this device has been verified */
     gboolean verified;
 
     /* The remaining members are indexed by the operation's "phase" */
 
     /* Whether this device has been executed in each phase */
     gboolean executed[st_phase_max];
     /* Whether this device is disallowed from executing in each phase */
     gboolean disallowed[st_phase_max];
     /* Action-specific timeout for each phase */
     int custom_action_timeout[st_phase_max];
     /* Action-specific maximum random delay for each phase */
     int delay_max[st_phase_max];
     /* Action-specific base delay for each phase */
     int delay_base[st_phase_max];
 } device_properties_t;
 
 typedef struct {
     /* Name of peer that sent this result */
     char *host;
     /* Only try peers for non-topology based operations once */
     gboolean tried;
     /* Number of entries in the devices table */
     int ndevices;
     /* Devices available to this host that are capable of fencing the target */
     GHashTable *devices;
 } peer_device_info_t;
 
 GHashTable *stonith_remote_op_list = NULL;
 
 extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
                                   int call_options);
 
 static void request_peer_fencing(remote_fencing_op_t *op,
                                  peer_device_info_t *peer);
 static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup);
 static void report_timeout_period(remote_fencing_op_t * op, int op_timeout);
 static int get_op_total_timeout(const remote_fencing_op_t *op,
                                 const peer_device_info_t *chosen_peer);
 
 static gint
 sort_strings(gconstpointer a, gconstpointer b)
 {
     return strcmp(a, b);
 }
 
 static void
 free_remote_query(gpointer data)
 {
     if (data != NULL) {
         peer_device_info_t *peer = data;
 
         g_hash_table_destroy(peer->devices);
         free(peer->host);
         free(peer);
     }
 }
 
 void
 free_stonith_remote_op_list()
 {
     if (stonith_remote_op_list != NULL) {
         g_hash_table_destroy(stonith_remote_op_list);
         stonith_remote_op_list = NULL;
     }
 }
 
 struct peer_count_data {
     const remote_fencing_op_t *op;
     gboolean verified_only;
     int count;
 };
 
 /*!
  * \internal
  * \brief Increment a counter if a device has not been executed yet
  *
  * \param[in] key        Device ID (ignored)
  * \param[in] value      Device properties
  * \param[in] user_data  Peer count data
  */
 static void
 count_peer_device(gpointer key, gpointer value, gpointer user_data)
 {
     device_properties_t *props = (device_properties_t*)value;
     struct peer_count_data *data = user_data;
 
     if (!props->executed[data->op->phase]
         && (!data->verified_only || props->verified)) {
         ++(data->count);
     }
 }
 
 /*!
  * \internal
  * \brief Check the number of available devices in a peer's query results
  *
  * \param[in] op             Operation that results are for
  * \param[in] peer           Peer to count
  * \param[in] verified_only  Whether to count only verified devices
  *
  * \return Number of devices available to peer that were not already executed
  */
 static int
 count_peer_devices(const remote_fencing_op_t *op,
                    const peer_device_info_t *peer, gboolean verified_only)
 {
     struct peer_count_data data;
 
     data.op = op;
     data.verified_only = verified_only;
     data.count = 0;
     if (peer) {
         g_hash_table_foreach(peer->devices, count_peer_device, &data);
     }
     return data.count;
 }
 
 /*!
  * \internal
  * \brief Search for a device in a query result
  *
  * \param[in] op      Operation that result is for
  * \param[in] peer    Query result for a peer
  * \param[in] device  Device ID to search for
  *
  * \return Device properties if found, NULL otherwise
  */
 static device_properties_t *
 find_peer_device(const remote_fencing_op_t *op, const peer_device_info_t *peer,
                  const char *device)
 {
     device_properties_t *props = g_hash_table_lookup(peer->devices, device);
 
     return (props && !props->executed[op->phase]
            && !props->disallowed[op->phase])? props : NULL;
 }
 
 /*!
  * \internal
  * \brief Find a device in a peer's device list and mark it as executed
  *
  * \param[in]     op                     Operation that peer result is for
  * \param[in,out] peer                   Peer with results to search
  * \param[in]     device                 ID of device to mark as done
  * \param[in]     verified_devices_only  Only consider verified devices
  *
  * \return TRUE if device was found and marked, FALSE otherwise
  */
 static gboolean
 grab_peer_device(const remote_fencing_op_t *op, peer_device_info_t *peer,
                  const char *device, gboolean verified_devices_only)
 {
     device_properties_t *props = find_peer_device(op, peer, device);
 
     if ((props == NULL) || (verified_devices_only && !props->verified)) {
         return FALSE;
     }
 
     crm_trace("Removing %s from %s (%d remaining)",
               device, peer->host, count_peer_devices(op, peer, FALSE));
     props->executed[op->phase] = TRUE;
     return TRUE;
 }
 
 static void
 clear_remote_op_timers(remote_fencing_op_t * op)
 {
     if (op->query_timer) {
         g_source_remove(op->query_timer);
         op->query_timer = 0;
     }
     if (op->op_timer_total) {
         g_source_remove(op->op_timer_total);
         op->op_timer_total = 0;
     }
     if (op->op_timer_one) {
         g_source_remove(op->op_timer_one);
         op->op_timer_one = 0;
     }
 }
 
 static void
 free_remote_op(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     crm_log_xml_debug(op->request, "Destroying");
 
     clear_remote_op_timers(op);
 
     free(op->id);
     free(op->action);
     free(op->delegate);
     free(op->target);
     free(op->client_id);
     free(op->client_name);
     free(op->originator);
 
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
     }
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     g_list_free_full(op->automatic_list, free);
     g_list_free(op->duplicates);
 
     pcmk__reset_result(&op->result);
     free(op);
 }
 
 void
 init_stonith_remote_op_hash_table(GHashTable **table)
 {
     if (*table == NULL) {
         *table = pcmk__strkey_table(NULL, free_remote_op);
     }
 }
 
 /*!
  * \internal
  * \brief Return an operation's originally requested action (before any remap)
  *
  * \param[in] op  Operation to check
  *
  * \return Operation's original action
  */
 static const char *
 op_requested_action(const remote_fencing_op_t *op)
 {
     return ((op->phase > st_phase_requested)? "reboot" : op->action);
 }
 
 /*!
  * \internal
  * \brief Remap a "reboot" operation to the "off" phase
  *
  * \param[in,out] op      Operation to remap
  */
 static void
 op_phase_off(remote_fencing_op_t *op)
 {
     crm_info("Remapping multiple-device reboot targeting %s to 'off' "
              CRM_XS " id=%.8s", op->target, op->id);
     op->phase = st_phase_off;
 
     /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the
      * memory allocation at each phase.
      */
     strcpy(op->action, "off");
 }
 
 /*!
  * \internal
  * \brief Advance a remapped reboot operation to the "on" phase
  *
  * \param[in,out] op  Operation to remap
  */
 static void
 op_phase_on(remote_fencing_op_t *op)
 {
     GList *iter = NULL;
 
     crm_info("Remapped 'off' targeting %s complete, "
              "remapping to 'on' for %s " CRM_XS " id=%.8s",
              op->target, op->client_name, op->id);
     op->phase = st_phase_on;
     strcpy(op->action, "on");
 
     /* Skip devices with automatic unfencing, because the cluster will handle it
      * when the node rejoins.
      */
     for (iter = op->automatic_list; iter != NULL; iter = iter->next) {
         GList *match = g_list_find_custom(op->devices_list, iter->data,
                                             sort_strings);
 
         if (match) {
             op->devices_list = g_list_remove(op->devices_list, match->data);
         }
     }
     g_list_free_full(op->automatic_list, free);
     op->automatic_list = NULL;
 
     /* Rewind device list pointer */
     op->devices = op->devices_list;
 }
 
 /*!
  * \internal
  * \brief Reset a remapped reboot operation
  *
  * \param[in,out] op  Operation to reset
  */
 static void
 undo_op_remap(remote_fencing_op_t *op)
 {
     if (op->phase > 0) {
         crm_info("Undoing remap of reboot targeting %s for %s "
                  CRM_XS " id=%.8s", op->target, op->client_name, op->id);
         op->phase = st_phase_requested;
         strcpy(op->action, "reboot");
     }
 }
 
 /*!
  * \internal
  * \brief Create notification data XML for a fencing operation result
  *
  * \param[in] op      Fencer operation that completed
  *
  * \return Newly created XML to add as notification data
  * \note The caller is responsible for freeing the result.
  */
 static xmlNode *
 fencing_result2xml(remote_fencing_op_t *op)
 {
     xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
     crm_xml_add_int(notify_data, "state", op->state);
     crm_xml_add(notify_data, F_STONITH_TARGET, op->target);
     crm_xml_add(notify_data, F_STONITH_ACTION, op->action);
     crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate);
     crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(notify_data, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(notify_data, F_STONITH_CLIENTNAME, op->client_name);
 
     stonith__xe_set_result(notify_data, &op->result);
     return notify_data;
 }
 
 /*!
  * \internal
  * \brief Broadcast a fence result notification to all CPG peers
  *
  * \param[in] op         Fencer operation that completed
  * \param[in] op_merged  Whether this operation is a duplicate of another
  */
 void
 fenced_broadcast_op_result(remote_fencing_op_t *op, bool op_merged)
 {
     static int count = 0;
     xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY);
     xmlNode *notify_data = fencing_result2xml(op);
 
     count++;
     crm_trace("Broadcasting result to peers");
     crm_xml_add(bcast, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(bcast, F_SUBTYPE, "broadcast");
     crm_xml_add(bcast, F_STONITH_OPERATION, T_STONITH_NOTIFY);
     crm_xml_add_int(bcast, "count", count);
 
     if (op_merged) {
         pcmk__xe_set_bool_attr(bcast, F_STONITH_MERGED, true);
     }
 
     add_message_xml(bcast, F_STONITH_CALLDATA, notify_data);
     send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
     free_xml(notify_data);
     free_xml(bcast);
 
     return;
 }
 
 /*!
  * \internal
  * \brief Reply to a local request originator and notify all subscribed clients
  *
  * \param[in] op         Fencer operation that completed
  * \param[in] data       Top-level XML to add notification to
  */
 static void
 handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data)
 {
     xmlNode *notify_data = NULL;
     xmlNode *reply = NULL;
     pcmk__client_t *client = NULL;
 
     if (op->notify_sent == TRUE) {
         /* nothing to do */
         return;
     }
 
     /* Do notification with a clean data object */
     crm_xml_add_int(data, "state", op->state);
     crm_xml_add(data, F_STONITH_TARGET, op->target);
     crm_xml_add(data, F_STONITH_OPERATION, op->action);
 
     reply = fenced_construct_reply(op->request, data, &op->result);
     crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate);
 
     /* Send fencing OP reply to local client that initiated fencing */
     client = pcmk__find_client_by_id(op->client_id);
     if (client == NULL) {
         crm_trace("Skipping reply to %s: no longer a client", op->client_id);
     } else {
         do_local_reply(reply, client, op->call_options);
     }
 
     /* bcast to all local clients that the fencing operation happend */
     notify_data = fencing_result2xml(op);
     fenced_send_notification(T_STONITH_NOTIFY_FENCE, &op->result, notify_data);
     free_xml(notify_data);
     fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL);
 
     /* mark this op as having notify's already sent */
     op->notify_sent = TRUE;
     free_xml(reply);
 }
 
 /*!
  * \internal
  * \brief Finalize all duplicates of a given fencer operation
  *
  * \param[in] op         Fencer operation that completed
  * \param[in] data       Top-level XML to add notification to
  */
 static void
 finalize_op_duplicates(remote_fencing_op_t *op, xmlNode *data)
 {
     for (GList *iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *other = iter->data;
 
         if (other->state == st_duplicate) {
             other->state = op->state;
             crm_debug("Performing duplicate notification for %s@%s: %s "
                       CRM_XS " id=%.8s",
                       other->client_name, other->originator,
                       pcmk_exec_status_str(op->result.execution_status),
                       other->id);
             pcmk__copy_result(&op->result, &other->result);
             finalize_op(other, data, true);
 
         } else {
             // Possible if (for example) it timed out already
             crm_err("Skipping duplicate notification for %s@%s "
                     CRM_XS " state=%s id=%.8s",
                     other->client_name, other->originator,
                     stonith_op_state_str(other->state), other->id);
         }
     }
 }
 
 static char *
 delegate_from_xml(xmlNode *xml)
 {
     xmlNode *match = get_xpath_object("//@" F_STONITH_DELEGATE, xml, LOG_NEVER);
 
     if (match == NULL) {
         return crm_element_value_copy(xml, F_ORIG);
     } else {
         return crm_element_value_copy(match, F_STONITH_DELEGATE);
     }
 }
 
 /*!
  * \internal
  * \brief Finalize a peer fencing operation
  *
  * Clean up after a fencing operation completes. This function has two code
  * paths: the executioner uses it to broadcast the result to CPG peers, and then
  * each peer (including the executioner) uses it to process that broadcast and
  * notify its IPC clients of the result.
  *
  * \param[in] op      Fencer operation that completed
  * \param[in] data    If not NULL, XML reply of last delegated fencing operation
  * \param[in] dup     Whether this operation is a duplicate of another
  *                    (in which case, do not broadcast the result)
  *
  *  \note The operation result should be set before calling this function.
  */
 static void
 finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup)
 {
     int level = LOG_ERR;
     const char *subt = NULL;
     xmlNode *local_data = NULL;
     gboolean op_merged = FALSE;
 
     CRM_CHECK((op != NULL), return);
 
     if (op->notify_sent) {
         // Most likely, this is a timed-out action that eventually completed
         crm_notice("Operation '%s'%s%s by %s for %s@%s%s: "
                    "Result arrived too late " CRM_XS " id=%.8s",
                    op->action, (op->target? " targeting " : ""),
                    (op->target? op->target : ""),
                    (op->delegate? op->delegate : "unknown node"),
                    op->client_name, op->originator,
                    (op_merged? " (merged)" : ""),
                    op->id);
         return;
     }
 
     set_fencing_completed(op);
     clear_remote_op_timers(op);
     undo_op_remap(op);
 
     if (data == NULL) {
         data = create_xml_node(NULL, "remote-op");
         local_data = data;
 
     } else if (op->delegate == NULL) {
         switch (op->result.execution_status) {
             case PCMK_EXEC_NO_FENCE_DEVICE:
                 break;
             case PCMK_EXEC_INVALID:
                 if (op->result.exit_status == CRM_EX_EXPIRED) {
                     break;
                 }
                 // else fall through
             default:
                 op->delegate = delegate_from_xml(data);
                 break;
         }
     }
 
     if (dup || (crm_element_value(data, F_STONITH_MERGED) != NULL)) {
         op_merged = true;
     }
 
     /* Tell everyone the operation is done, we will continue
      * with doing the local notifications once we receive
      * the broadcast back. */
     subt = crm_element_value(data, F_SUBTYPE);
     if (!dup && !pcmk__str_eq(subt, "broadcast", pcmk__str_casei)) {
         /* Defer notification until the bcast message arrives */
         fenced_broadcast_op_result(op, op_merged);
         free_xml(local_data);
         return;
     }
 
     if (pcmk__result_ok(&op->result) || dup
         || !pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) {
         level = LOG_NOTICE;
     }
     do_crm_log(level, "Operation '%s'%s%s by %s for %s@%s%s: %s (%s%s%s) "
                CRM_XS " id=%.8s", op->action, (op->target? " targeting " : ""),
                (op->target? op->target : ""),
                (op->delegate? op->delegate : "unknown node"),
                op->client_name, op->originator,
                (op_merged? " (merged)" : ""),
                crm_exit_str(op->result.exit_status),
                pcmk_exec_status_str(op->result.execution_status),
                ((op->result.exit_reason == NULL)? "" : ": "),
                ((op->result.exit_reason == NULL)? "" : op->result.exit_reason),
                op->id);
 
     handle_local_reply_and_notify(op, data);
 
     if (!dup) {
         finalize_op_duplicates(op, data);
     }
 
     /* Free non-essential parts of the record
      * Keep the record around so we can query the history
      */
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
         op->query_results = NULL;
     }
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
 
     free_xml(local_data);
 }
 
 /*!
  * \internal
  * \brief Finalize a watchdog fencer op after the waiting time expires
  *
  * \param[in] userdata  Fencer operation that completed
  *
  * \return G_SOURCE_REMOVE (which tells glib not to restart timer)
  */
 static gboolean
 remote_op_watchdog_done(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Self-fencing (%s) by %s for %s assumed complete "
                CRM_XS " id=%.8s",
                op->action, op->target, op->client_name, op->id);
     op->state = st_done;
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
     finalize_op(op, NULL, false);
     return G_SOURCE_REMOVE;
 }
 
 static gboolean
 remote_op_timeout_one(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Peer's '%s' action targeting %s for client %s timed out " CRM_XS
                " id=%.8s", op->action, op->target, op->client_name, op->id);
     pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT,
                      "Peer did not return fence result within timeout");
 
     // Try another device, if appropriate
     request_peer_fencing(op, NULL);
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Finalize a remote fencer operation that timed out
  *
  * \param[in] op      Fencer operation that timed out
  * \param[in] reason  Readable description of what step timed out
  */
 static void
 finalize_timed_out_op(remote_fencing_op_t *op, const char *reason)
 {
     op->op_timer_total = 0;
 
     crm_debug("Action '%s' targeting %s for client %s timed out "
               CRM_XS " id=%.8s",
               op->action, op->target, op->client_name, op->id);
 
     if (op->phase == st_phase_on) {
         /* A remapped reboot operation timed out in the "on" phase, but the
          * "off" phase completed successfully, so quit trying any further
          * devices, and return success.
          */
         op->state = st_done;
         pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
     } else {
         op->state = st_failed;
         pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, reason);
     }
     finalize_op(op, NULL, false);
 }
 
 /*!
  * \internal
  * \brief Finalize a remote fencer operation that timed out
  *
  * \param[in] userdata  Fencer operation that timed out
  *
  * \return G_SOURCE_REMOVE (which tells glib not to restart timer)
  */
 static gboolean
 remote_op_timeout(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     if (op->state == st_done) {
         crm_debug("Action '%s' targeting %s for client %s already completed "
                   CRM_XS " id=%.8s",
                   op->action, op->target, op->client_name, op->id);
     } else {
         finalize_timed_out_op(userdata, "Fencing did not complete within a "
                                         "total timeout based on the "
                                         "configured timeout and retries for "
                                         "any devices attempted");
     }
     return G_SOURCE_REMOVE;
 }
 
 static gboolean
 remote_op_query_timeout(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     op->query_timer = 0;
     if (op->state == st_done) {
         crm_debug("Operation %.8s targeting %s already completed",
                   op->id, op->target);
     } else if (op->state == st_exec) {
         crm_debug("Operation %.8s targeting %s already in progress",
                   op->id, op->target);
     } else if (op->query_results) {
         // Query succeeded, so attempt the actual fencing
         crm_debug("Query %.8s targeting %s complete (state=%s)",
                   op->id, op->target, stonith_op_state_str(op->state));
         request_peer_fencing(op, NULL);
     } else {
         crm_debug("Query %.8s targeting %s timed out (state=%s)",
                   op->id, op->target, stonith_op_state_str(op->state));
         if (op->op_timer_total) {
             g_source_remove(op->op_timer_total);
             op->op_timer_total = 0;
         }
         finalize_timed_out_op(op, "No capable peers replied to device query "
                                   "within timeout");
     }
 
     return FALSE;
 }
 
 static gboolean
 topology_is_empty(stonith_topology_t *tp)
 {
     int i;
 
     if (tp == NULL) {
         return TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         if (tp->levels[i] != NULL) {
             return FALSE;
         }
     }
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Add a device to an operation's automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to add
  */
 static void
 add_required_device(remote_fencing_op_t *op, const char *device)
 {
     GList *match  = g_list_find_custom(op->automatic_list, device,
                                          sort_strings);
 
     if (!match) {
         op->automatic_list = g_list_prepend(op->automatic_list, strdup(device));
     }
 }
 
 /*!
  * \internal
  * \brief Remove a device from the automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to remove
  */
 static void
 remove_required_device(remote_fencing_op_t *op, const char *device)
 {
     GList *match = g_list_find_custom(op->automatic_list, device,
                                         sort_strings);
 
     if (match) {
         op->automatic_list = g_list_remove(op->automatic_list, match->data);
     }
 }
 
 /* deep copy the device list */
 static void
 set_op_device_list(remote_fencing_op_t * op, GList *devices)
 {
     GList *lpc = NULL;
 
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     for (lpc = devices; lpc != NULL; lpc = lpc->next) {
         op->devices_list = g_list_append(op->devices_list, strdup(lpc->data));
     }
     op->devices = op->devices_list;
 }
 
 /*!
  * \internal
  * \brief Check whether a node matches a topology target
  *
  * \param[in] tp    Topology table entry to check
  * \param[in] node  Name of node to check
  *
  * \return TRUE if node matches topology target
  */
 static gboolean
 topology_matches(const stonith_topology_t *tp, const char *node)
 {
     regex_t r_patt;
 
     CRM_CHECK(node && tp && tp->target, return FALSE);
     switch (tp->kind) {
         case fenced_target_by_attribute:
             /* This level targets by attribute, so tp->target is a NAME=VALUE pair
              * of a permanent attribute applied to targeted nodes. The test below
              * relies on the locally cached copy of the CIB, so if fencing needs to
              * be done before the initial CIB is received or after a malformed CIB
              * is received, then the topology will be unable to be used.
              */
             if (node_has_attr(node, tp->target_attribute, tp->target_value)) {
                 crm_notice("Matched %s with %s by attribute", node, tp->target);
                 return TRUE;
             }
             break;
 
         case fenced_target_by_pattern:
             /* This level targets node names matching a pattern, so tp->target
              * (and tp->target_pattern) is a regular expression.
              */
             if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) {
                 crm_info("Bad regex '%s' for fencing level", tp->target);
             } else {
                 int status = regexec(&r_patt, node, 0, NULL, 0);
 
                 regfree(&r_patt);
                 if (status == 0) {
                     crm_notice("Matched %s with %s by name", node, tp->target);
                     return TRUE;
                 }
             }
             break;
 
         case fenced_target_by_name:
             crm_trace("Testing %s against %s", node, tp->target);
             return pcmk__str_eq(tp->target, node, pcmk__str_casei);
 
         default:
             break;
     }
     crm_trace("No match for %s with %s", node, tp->target);
     return FALSE;
 }
 
 stonith_topology_t *
 find_topology_for_host(const char *host) 
 {
     GHashTableIter tIter;
     stonith_topology_t *tp = g_hash_table_lookup(topology, host);
 
     if(tp != NULL) {
         crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
         return tp;
     }
 
     g_hash_table_iter_init(&tIter, topology);
     while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) {
         if (topology_matches(tp, host)) {
             crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
             return tp;
         }
     }
 
     crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology));
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Set fencing operation's device list to target's next topology level
  *
  * \param[in,out] op        Remote fencing operation to modify
  * \param[in]     empty_ok  If true, an operation without a target (i.e.
  *                          queries) or a target without a topology will get a
  *                          pcmk_rc_ok return value instead of ENODEV
  *
  * \return Standard Pacemaker return value
  */
 static int
 advance_topology_level(remote_fencing_op_t *op, bool empty_ok)
 {
     stonith_topology_t *tp = NULL;
 
     if (op->target) {
         tp = find_topology_for_host(op->target);
     }
     if (topology_is_empty(tp)) {
         return empty_ok? pcmk_rc_ok : ENODEV;
     }
 
     CRM_ASSERT(tp->levels != NULL);
 
     stonith__set_call_options(op->call_options, op->id, st_opt_topology);
 
     /* This is a new level, so undo any remapping left over from previous */
     undo_op_remap(op);
 
     do {
         op->level++;
 
     } while (op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL);
 
     if (op->level < ST_LEVEL_MAX) {
         crm_trace("Attempting fencing level %d targeting %s (%d devices) "
                   "for client %s@%s (id=%.8s)",
                   op->level, op->target, g_list_length(tp->levels[op->level]),
                   op->client_name, op->originator, op->id);
         set_op_device_list(op, tp->levels[op->level]);
 
         // The requested delay has been applied for the first fencing level
         if (op->level > 1 && op->delay > 0) {
             op->delay = 0;
         }
 
         if (g_list_next(op->devices_list) && pcmk__str_eq(op->action, "reboot", pcmk__str_casei)) {
             /* A reboot has been requested for a topology level with multiple
              * devices. Instead of rebooting the devices sequentially, we will
              * turn them all off, then turn them all on again. (Think about
              * switched power outlets for redundant power supplies.)
              */
             op_phase_off(op);
         }
         return pcmk_rc_ok;
     }
 
     crm_notice("All fencing options targeting %s for client %s@%s failed "
                CRM_XS " id=%.8s",
                op->target, op->client_name, op->originator, op->id);
     return ENODEV;
 }
 
 /*!
  * \brief Check to see if this operation is a duplicate of another in flight
  * operation. If so merge this operation into the inflight operation, and mark
  * it as a duplicate.
  */
 static void
 merge_duplicates(remote_fencing_op_t * op)
 {
     GHashTableIter iter;
     remote_fencing_op_t *other = NULL;
 
     time_t now = time(NULL);
 
     g_hash_table_iter_init(&iter, stonith_remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) {
         const char *other_action = op_requested_action(other);
 
         if (!strcmp(op->id, other->id)) {
             continue; // Don't compare against self
         }
         if (other->state > st_exec) {
             crm_trace("%.8s not duplicate of %.8s: not in progress",
                       op->id, other->id);
             continue;
         }
         if (!pcmk__str_eq(op->target, other->target, pcmk__str_casei)) {
             crm_trace("%.8s not duplicate of %.8s: node %s vs. %s",
                       op->id, other->id, op->target, other->target);
             continue;
         }
         if (!pcmk__str_eq(op->action, other_action, pcmk__str_casei)) {
             crm_trace("%.8s not duplicate of %.8s: action %s vs. %s",
                       op->id, other->id, op->action, other_action);
             continue;
         }
         if (pcmk__str_eq(op->client_name, other->client_name, pcmk__str_casei)) {
             crm_trace("%.8s not duplicate of %.8s: same client %s",
                       op->id, other->id, op->client_name);
             continue;
         }
         if (pcmk__str_eq(other->target, other->originator, pcmk__str_casei)) {
             crm_trace("%.8s not duplicate of %.8s: suicide for %s",
                       op->id, other->id, other->target);
             continue;
         }
         if (!fencing_peer_active(crm_get_peer(0, other->originator))) {
             crm_notice("Failing action '%s' targeting %s originating from "
                        "client %s@%s: Originator is dead " CRM_XS " id=%.8s",
                        other->action, other->target, other->client_name,
                        other->originator, other->id);
             crm_trace("%.8s not duplicate of %.8s: originator dead",
                       op->id, other->id);
             other->state = st_failed;
             continue;
         }
         if ((other->total_timeout > 0)
             && (now > (other->total_timeout + other->created))) {
             crm_trace("%.8s not duplicate of %.8s: old (%ld vs. %ld + %d)",
                       op->id, other->id, now, other->created,
                       other->total_timeout);
             continue;
         }
 
         /* There is another in-flight request to fence the same host
          * Piggyback on that instead.  If it fails, so do we.
          */
         other->duplicates = g_list_append(other->duplicates, op);
         if (other->total_timeout == 0) {
             other->total_timeout = op->total_timeout =
                 TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL);
             crm_trace("Best guess as to timeout used for %.8s: %d",
                       other->id, other->total_timeout);
         }
         crm_notice("Merging fencing action '%s' targeting %s originating from "
                    "client %s with identical request from %s@%s "
                    CRM_XS " original=%.8s duplicate=%.8s total_timeout=%ds",
                    op->action, op->target, op->client_name,
                    other->client_name, other->originator,
                    op->id, other->id, other->total_timeout);
         report_timeout_period(op, other->total_timeout);
         op->state = st_duplicate;
     }
 }
 
 static uint32_t fencing_active_peers(void)
 {
     uint32_t count = 0;
     crm_node_t *entry;
     GHashTableIter gIter;
 
     g_hash_table_iter_init(&gIter, crm_peer_cache);
     while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
         if(fencing_peer_active(entry)) {
             count++;
         }
     }
     return count;
 }
 
 /*!
  * \internal
  * \brief Process a manual confirmation of a pending fence action
  *
  * \param[in]  client  IPC client that sent confirmation
  * \param[in]  msg     Request XML with manual confirmation
  *
  * \return Standard Pacemaker return code
  */
 int
 fenced_handle_manual_confirmation(pcmk__client_t *client, xmlNode *msg)
 {
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return EPROTO);
 
     crm_notice("Received manual confirmation that %s has been fenced",
                crm_str(crm_element_value(dev, F_STONITH_TARGET)));
     op = initiate_remote_stonith_op(client, msg, TRUE);
     if (op == NULL) {
         return EPROTO;
     }
     op->state = st_done;
     set_fencing_completed(op);
     op->delegate = strdup("a human");
 
     // For the fencer's purposes, the fencing operation is done
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
     finalize_op(op, msg, false);
 
     /* For the requester's purposes, the operation is still pending. The
      * actual result will be sent asynchronously via the operation's done_cb().
      */
     return EINPROGRESS;
 }
 
 /*!
  * \internal
  * \brief Create a new remote stonith operation
  *
  * \param[in] client   ID of local stonith client that initiated the operation
  * \param[in] request  The request from the client that started the operation
  * \param[in] peer     TRUE if this operation is owned by another stonith peer
  *                     (an operation owned by one peer is stored on all peers,
  *                     but only the owner executes it; all nodes get the results
  *                     once the owner finishes execution)
  */
 void *
 create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer)
 {
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_NEVER);
     int call_options = 0;
     const char *operation = NULL;
 
     init_stonith_remote_op_hash_table(&stonith_remote_op_list);
 
     /* If this operation is owned by another node, check to make
      * sure we haven't already created this operation. */
     if (peer && dev) {
         const char *op_id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
 
         CRM_CHECK(op_id != NULL, return NULL);
 
         op = g_hash_table_lookup(stonith_remote_op_list, op_id);
         if (op) {
             crm_debug("Reusing existing remote fencing op %.8s for %s",
                       op_id, ((client == NULL)? "unknown client" : client));
             return op;
         }
     }
 
     op = calloc(1, sizeof(remote_fencing_op_t));
     CRM_ASSERT(op != NULL);
 
     crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout));
     // Value -1 means disable any static/random fencing delays
     crm_element_value_int(request, F_STONITH_DELAY, &(op->delay));
 
     if (peer && dev) {
         op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID);
     } else {
         op->id = crm_generate_uuid();
     }
 
     g_hash_table_replace(stonith_remote_op_list, op->id, op);
 
     op->state = st_query;
     op->replies_expected = fencing_active_peers();
     op->action = crm_element_value_copy(dev, F_STONITH_ACTION);
     op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN);
     op->delegate = crm_element_value_copy(dev, F_STONITH_DELEGATE); /* May not be set */
     op->created = time(NULL);
 
     if (op->originator == NULL) {
         /* Local or relayed request */
         op->originator = strdup(stonith_our_uname);
     }
 
     CRM_LOG_ASSERT(client != NULL);
     if (client) {
         op->client_id = strdup(client);
     }
 
 
     /* For a RELAY operation, set fenced on the client. */
     operation = crm_element_value(request, F_STONITH_OPERATION);
 
     if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) {
         op->client_name = crm_strdup_printf("%s.%lu", crm_system_name,
                                          (unsigned long) getpid());
     } else {
         op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME);
     }
 
     op->target = crm_element_value_copy(dev, F_STONITH_TARGET);
     op->request = copy_xml(request);    /* TODO: Figure out how to avoid this */
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     op->call_options = call_options;
 
     crm_element_value_int(request, F_STONITH_CALLID, &(op->client_callid));
 
     crm_trace("%s new fencing op %s ('%s' targeting %s for client %s, "
               "base timeout %d, %u %s expected)",
               (peer && dev)? "Recorded" : "Generated", op->id, op->action,
               op->target, op->client_name, op->base_timeout,
               op->replies_expected,
               pcmk__plural_alt(op->replies_expected, "reply", "replies"));
 
     if (op->call_options & st_opt_cs_nodeid) {
         int nodeid;
         crm_node_t *node;
 
         pcmk__scan_min_int(op->target, &nodeid, 0);
         node = pcmk__search_known_node_cache(nodeid, NULL, CRM_GET_PEER_ANY);
 
         /* Ensure the conversion only happens once */
         stonith__clear_call_options(op->call_options, op->id, st_opt_cs_nodeid);
 
         if (node && node->uname) {
             free(op->target);
             op->target = strdup(node->uname);
 
         } else {
             crm_warn("Could not expand nodeid '%s' into a host name", op->target);
         }
     }
 
     /* check to see if this is a duplicate operation of another in-flight operation */
     merge_duplicates(op);
 
     if (op->state != st_duplicate) {
         /* kick history readers */
         fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL);
     }
 
     /* safe to trim as long as that doesn't touch pending ops */
     stonith_fence_history_trim();
 
     return op;
 }
 
 /*!
  * \internal
  * \brief Create a peer fencing operation from a request, and initiate it
  *
  * \param[in] client     IPC client that made request (NULL to get from request)
  * \param[in] request    Request XML
  * \param[in] manual_ack Whether this is a manual action confirmation
  *
  * \return Newly created operation on success, otherwise NULL
  */
 remote_fencing_op_t *
 initiate_remote_stonith_op(pcmk__client_t *client, xmlNode *request,
                            gboolean manual_ack)
 {
     int query_timeout = 0;
     xmlNode *query = NULL;
     const char *client_id = NULL;
     remote_fencing_op_t *op = NULL;
     const char *relay_op_id = NULL;
     const char *operation = NULL;
 
     if (client) {
         client_id = client->id;
     } else {
         client_id = crm_element_value(request, F_STONITH_CLIENTID);
     }
 
     CRM_LOG_ASSERT(client_id != NULL);
     op = create_remote_stonith_op(client_id, request, FALSE);
     op->owner = TRUE;
     if (manual_ack) {
         return op;
     }
 
     CRM_CHECK(op->action, return NULL);
 
     if (advance_topology_level(op, true) != pcmk_rc_ok) {
         op->state = st_failed;
     }
 
     switch (op->state) {
         case st_failed:
             // advance_topology_level() exhausted levels
             pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_ERROR,
                              "All topology levels failed");
             crm_warn("Could not request peer fencing (%s) targeting %s "
                      CRM_XS " id=%.8s", op->action, op->target, op->id);
             finalize_op(op, NULL, false);
             return op;
 
         case st_duplicate:
             crm_info("Requesting peer fencing (%s) targeting %s (duplicate) "
                      CRM_XS " id=%.8s", op->action, op->target, op->id);
             return op;
 
         default:
             crm_notice("Requesting peer fencing (%s) targeting %s "
                        CRM_XS " id=%.8s state=%s base_timeout=%d",
                        op->action, op->target, op->id,
                        stonith_op_state_str(op->state), op->base_timeout);
     }
 
     query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY,
                               NULL, op->call_options);
 
     crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(query, F_STONITH_TARGET, op->target);
     crm_xml_add(query, F_STONITH_ACTION, op_requested_action(op));
     crm_xml_add(query, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(query, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name);
     crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout);
 
     /* In case of RELAY operation, RELAY information is added to the query to delete the original operation of RELAY. */
     operation = crm_element_value(request, F_STONITH_OPERATION);
     if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) {
         relay_op_id = crm_element_value(request, F_STONITH_REMOTE_OP_ID);
         if (relay_op_id) {
             crm_xml_add(query, F_STONITH_REMOTE_OP_ID_RELAY, relay_op_id);
         }
     }
 
     send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
     free_xml(query);
 
     query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR;
     op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op);
 
     return op;
 }
 
 enum find_best_peer_options {
     /*! Skip checking the target peer for capable fencing devices */
     FIND_PEER_SKIP_TARGET = 0x0001,
     /*! Only check the target peer for capable fencing devices */
     FIND_PEER_TARGET_ONLY = 0x0002,
     /*! Skip peers and devices that are not verified */
     FIND_PEER_VERIFIED_ONLY = 0x0004,
 };
 
 static peer_device_info_t *
 find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options)
 {
     GList *iter = NULL;
     gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE;
 
     if (!device && pcmk_is_set(op->call_options, st_opt_topology)) {
         return NULL;
     }
 
     for (iter = op->query_results; iter != NULL; iter = iter->next) {
         peer_device_info_t *peer = iter->data;
 
         crm_trace("Testing result from %s targeting %s with %d device%s: %d %x",
                   peer->host, op->target, peer->ndevices,
                   pcmk__plural_s(peer->ndevices), peer->tried, options);
         if ((options & FIND_PEER_SKIP_TARGET) && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
             continue;
         }
         if ((options & FIND_PEER_TARGET_ONLY) && !pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
             continue;
         }
 
         if (pcmk_is_set(op->call_options, st_opt_topology)) {
 
             if (grab_peer_device(op, peer, device, verified_devices_only)) {
                 return peer;
             }
 
         } else if ((peer->tried == FALSE)
                    && count_peer_devices(op, peer, verified_devices_only)) {
 
             /* No topology: Use the current best peer */
             crm_trace("Simple fencing");
             return peer;
         }
     }
 
     return NULL;
 }
 
 static peer_device_info_t *
 stonith_choose_peer(remote_fencing_op_t * op)
 {
     const char *device = NULL;
     peer_device_info_t *peer = NULL;
     uint32_t active = fencing_active_peers();
 
     do {
         if (op->devices) {
             device = op->devices->data;
             crm_trace("Checking for someone to fence (%s) %s using %s",
                       op->action, op->target, device);
         } else {
             crm_trace("Checking for someone to fence (%s) %s",
                       op->action, op->target);
         }
 
         /* Best choice is a peer other than the target with verified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY);
         if (peer) {
             crm_trace("Found verified peer %s for %s", peer->host, device?device:"<any>");
             return peer;
         }
 
         if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) {
             crm_trace("Waiting before looking for unverified devices to fence %s", op->target);
             return NULL;
         }
 
         /* If no other peer has verified access, next best is unverified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET);
         if (peer) {
             crm_trace("Found best unverified peer %s", peer->host);
             return peer;
         }
 
         /* If no other peer can do it, last option is self-fencing
          * (which is never allowed for the "on" phase of a remapped reboot)
          */
         if (op->phase != st_phase_on) {
             peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY);
             if (peer) {
                 crm_trace("%s will fence itself", peer->host);
                 return peer;
             }
         }
 
         /* Try the next fencing level if there is one (unless we're in the "on"
          * phase of a remapped "reboot", because we ignore errors in that case)
          */
     } while ((op->phase != st_phase_on)
              && pcmk_is_set(op->call_options, st_opt_topology)
              && (advance_topology_level(op, false) == pcmk_rc_ok));
 
     crm_notice("Couldn't find anyone to fence (%s) %s using %s",
                op->action, op->target, (device? device : "any device"));
     return NULL;
 }
 
 static int
 get_device_timeout(const remote_fencing_op_t *op,
                    const peer_device_info_t *peer, const char *device)
 {
     device_properties_t *props;
 
     if (!peer || !device) {
         return op->base_timeout;
     }
 
     props = g_hash_table_lookup(peer->devices, device);
     if (!props) {
         return op->base_timeout;
     }
 
     return (props->custom_action_timeout[op->phase]?
            props->custom_action_timeout[op->phase] : op->base_timeout)
            + props->delay_max[op->phase];
 }
 
 struct timeout_data {
     const remote_fencing_op_t *op;
     const peer_device_info_t *peer;
     int total_timeout;
 };
 
 /*!
  * \internal
  * \brief Add timeout to a total if device has not been executed yet
  *
  * \param[in] key        GHashTable key (device ID)
  * \param[in] value      GHashTable value (device properties)
  * \param[in] user_data  Timeout data
  */
 static void
 add_device_timeout(gpointer key, gpointer value, gpointer user_data)
 {
     const char *device_id = key;
     device_properties_t *props = value;
     struct timeout_data *timeout = user_data;
 
     if (!props->executed[timeout->op->phase]
         && !props->disallowed[timeout->op->phase]) {
         timeout->total_timeout += get_device_timeout(timeout->op,
                                                      timeout->peer, device_id);
     }
 }
 
 static int
 get_peer_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer)
 {
     struct timeout_data timeout;
 
     timeout.op = op;
     timeout.peer = peer;
     timeout.total_timeout = 0;
 
     g_hash_table_foreach(peer->devices, add_device_timeout, &timeout);
 
     return (timeout.total_timeout? timeout.total_timeout : op->base_timeout);
 }
 
 static int
 get_op_total_timeout(const remote_fencing_op_t *op,
                      const peer_device_info_t *chosen_peer)
 {
     int total_timeout = 0;
     stonith_topology_t *tp = find_topology_for_host(op->target);
 
     if (pcmk_is_set(op->call_options, st_opt_topology) && tp) {
         int i;
         GList *device_list = NULL;
         GList *iter = NULL;
 
         /* Yep, this looks scary, nested loops all over the place.
          * Here is what is going on.
          * Loop1: Iterate through fencing levels.
          * Loop2: If a fencing level has devices, loop through each device
          * Loop3: For each device in a fencing level, see what peer owns it
          *        and what that peer has reported the timeout is for the device.
          */
         for (i = 0; i < ST_LEVEL_MAX; i++) {
             if (!tp->levels[i]) {
                 continue;
             }
             for (device_list = tp->levels[i]; device_list; device_list = device_list->next) {
                 for (iter = op->query_results; iter != NULL; iter = iter->next) {
                     const peer_device_info_t *peer = iter->data;
 
                     if (find_peer_device(op, peer, device_list->data)) {
                         total_timeout += get_device_timeout(op, peer,
                                                             device_list->data);
                         break;
                     }
                 }               /* End Loop3: match device with peer that owns device, find device's timeout period */
             }                   /* End Loop2: iterate through devices at a specific level */
         }                       /*End Loop1: iterate through fencing levels */
 
     } else if (chosen_peer) {
         total_timeout = get_peer_timeout(op, chosen_peer);
     } else {
         total_timeout = op->base_timeout;
     }
 
     return total_timeout ? total_timeout : op->base_timeout;
 }
 
 static void
 report_timeout_period(remote_fencing_op_t * op, int op_timeout)
 {
     GList *iter = NULL;
     xmlNode *update = NULL;
     const char *client_node = NULL;
     const char *client_id = NULL;
     const char *call_id = NULL;
 
     if (op->call_options & st_opt_sync_call) {
         /* There is no reason to report the timeout for a synchronous call. It
          * is impossible to use the reported timeout to do anything when the client
          * is blocking for the response.  This update is only important for
          * async calls that require a callback to report the results in. */
         return;
     } else if (!op->request) {
         return;
     }
 
     crm_trace("Reporting timeout for %s (id=%.8s)", op->client_name, op->id);
     client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE);
     call_id = crm_element_value(op->request, F_STONITH_CALLID);
     client_id = crm_element_value(op->request, F_STONITH_CLIENTID);
     if (!client_node || !call_id || !client_id) {
         return;
     }
 
     if (pcmk__str_eq(client_node, stonith_our_uname, pcmk__str_casei)) {
-        /* The client is connected to this node, send the update direclty to them */
+        // Client is connected to this node, so send update directly to them
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return;
     }
 
     /* The client is connected to another node, relay this update to them */
     update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0);
     crm_xml_add(update, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(update, F_STONITH_CLIENTID, client_id);
     crm_xml_add(update, F_STONITH_CALLID, call_id);
     crm_xml_add_int(update, F_STONITH_TIMEOUT, op_timeout);
 
     send_cluster_message(crm_get_peer(0, client_node), crm_msg_stonith_ng, update, FALSE);
 
     free_xml(update);
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *dup = iter->data;
 
         crm_trace("Reporting timeout for duplicate %.8s to client %s",
                   dup->id, dup->client_name);
         report_timeout_period(iter->data, op_timeout);
     }
 }
 
 /*!
  * \internal
  * \brief Advance an operation to the next device in its topology
  *
  * \param[in] op      Fencer operation to advance
  * \param[in] device  ID of device that just completed
  * \param[in] msg     If not NULL, XML reply of last delegated fencing operation
  */
 static void
 advance_topology_device_in_level(remote_fencing_op_t *op, const char *device,
                                  xmlNode *msg)
 {
     /* Advance to the next device at this topology level, if any */
     if (op->devices) {
         op->devices = op->devices->next;
     }
 
     /* Handle automatic unfencing if an "on" action was requested */
     if ((op->phase == st_phase_requested) && pcmk__str_eq(op->action, "on", pcmk__str_casei)) {
         /* If the device we just executed was required, it's not anymore */
         remove_required_device(op, device);
 
         /* If there are no more devices at this topology level, run through any
          * remaining devices with automatic unfencing
          */
         if (op->devices == NULL) {
             op->devices = op->automatic_list;
         }
     }
 
     if ((op->devices == NULL) && (op->phase == st_phase_off)) {
         /* We're done with this level and with required devices, but we had
          * remapped "reboot" to "off", so start over with "on". If any devices
          * need to be turned back on, op->devices will be non-NULL after this.
          */
         op_phase_on(op);
     }
 
     // This function is only called if the previous device succeeded
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
 
     if (op->devices) {
         /* Necessary devices remain, so execute the next one */
         crm_trace("Next targeting %s on behalf of %s@%s",
                   op->target, op->client_name, op->originator);
 
         // The requested delay has been applied for the first device
         if (op->delay > 0) {
             op->delay = 0;
         }
 
         request_peer_fencing(op, NULL);
     } else {
         /* We're done with all devices and phases, so finalize operation */
         crm_trace("Marking complex fencing op targeting %s as complete",
                   op->target);
         op->state = st_done;
         finalize_op(op, msg, false);
     }
 }
 
 static gboolean
 check_watchdog_fencing_and_wait(remote_fencing_op_t * op)
 {
     if (node_does_watchdog_fencing(op->target)) {
 
         crm_notice("Waiting %lds for %s to self-fence (%s) for "
                    "client %s " CRM_XS " id=%.8s",
                    (stonith_watchdog_timeout_ms / 1000),
                    op->target, op->action, op->client_name, op->id);
         op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms,
                                          remote_op_watchdog_done, op);
         return TRUE;
     } else {
         crm_debug("Skipping fallback to watchdog-fencing as %s is "
                  "not in host-list", op->target);
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Ask a peer to execute a fencing operation
  *
  * \param[in] op      Fencing operation to be executed
  * \param[in] peer    If NULL or topology is in use, choose best peer to execute
  *                    the fencing, otherwise use this peer
  */
 static void
 request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer)
 {
     const char *device = NULL;
     int timeout;
 
     CRM_CHECK(op != NULL, return);
 
     crm_trace("Action %.8s targeting %s for %s is %s",
               op->id, op->target, op->client_name,
               stonith_op_state_str(op->state));
     timeout = op->base_timeout;
     if ((peer == NULL) && !pcmk_is_set(op->call_options, st_opt_topology)) {
         peer = stonith_choose_peer(op);
     }
 
     if (!op->op_timer_total) {
         int total_timeout = get_op_total_timeout(op, peer);
 
         op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * total_timeout;
         op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op);
         report_timeout_period(op, op->total_timeout);
         crm_info("Total timeout set to %d for peer's fencing targeting %s for %s"
                  CRM_XS "id=%.8s",
                  total_timeout, op->target, op->client_name, op->id);
     }
 
     if (pcmk_is_set(op->call_options, st_opt_topology) && op->devices) {
         /* Ignore the caller's peer preference if topology is in use, because
          * that peer might not have access to the required device. With
          * topology, stonith_choose_peer() removes the device from further
          * consideration, so the timeout must be calculated beforehand.
          *
          * @TODO Basing the total timeout on the caller's preferred peer (above)
          *       is less than ideal.
          */
         peer = stonith_choose_peer(op);
 
         device = op->devices->data;
         timeout = get_device_timeout(op, peer, device);
     }
 
     if (peer) {
         int timeout_one = 0;
         xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0);
 
         crm_xml_add(remote_op, F_STONITH_REMOTE_OP_ID, op->id);
         crm_xml_add(remote_op, F_STONITH_TARGET, op->target);
         crm_xml_add(remote_op, F_STONITH_ACTION, op->action);
         crm_xml_add(remote_op, F_STONITH_ORIGIN, op->originator);
         crm_xml_add(remote_op, F_STONITH_CLIENTID, op->client_id);
         crm_xml_add(remote_op, F_STONITH_CLIENTNAME, op->client_name);
         crm_xml_add_int(remote_op, F_STONITH_TIMEOUT, timeout);
         crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options);
         crm_xml_add_int(remote_op, F_STONITH_DELAY, op->delay);
 
         if (device) {
             timeout_one = TIMEOUT_MULTIPLY_FACTOR *
                           get_device_timeout(op, peer, device);
             crm_notice("Requesting that %s perform '%s' action targeting %s "
                        "using %s " CRM_XS " for client %s (%ds)",
                        peer->host, op->action, op->target, device,
                        op->client_name, timeout_one);
             crm_xml_add(remote_op, F_STONITH_DEVICE, device);
 
         } else {
             timeout_one = TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer);
             crm_notice("Requesting that %s perform '%s' action targeting %s "
                        CRM_XS " for client %s (%ds, %lds)",
                        peer->host, op->action, op->target, op->client_name,
                        timeout_one, stonith_watchdog_timeout_ms);
         }
 
         op->state = st_exec;
         if (op->op_timer_one) {
             g_source_remove(op->op_timer_one);
         }
 
         if (!(stonith_watchdog_timeout_ms > 0 && (
                 (pcmk__str_eq(device, STONITH_WATCHDOG_ID,
                                         pcmk__str_none)) ||
                 (pcmk__str_eq(peer->host, op->target, pcmk__str_casei)
                     && !pcmk__str_eq(op->action, "on", pcmk__str_casei))) &&
              check_watchdog_fencing_and_wait(op))) {
 
             /* Some thoughts about self-fencing cases reaching this point:
                - Actually check in check_watchdog_fencing_and_wait
                  shouldn't fail if STONITH_WATCHDOG_ID is
                  chosen as fencing-device and it being present implies
                  watchdog-fencing is enabled anyway
                - If watchdog-fencing is disabled either in general or for
                  a specific target - detected in check_watchdog_fencing_and_wait -
                  for some other kind of self-fencing we can't expect
                  a success answer but timeout is fine if the node doesn't
                  come back in between
                - Delicate might be the case where we have watchdog-fencing
                  enabled for a node but the watchdog-fencing-device isn't
                  explicitly chosen for suicide. Local pe-execution in sbd
                  may detect the node as unclean and lead to timely suicide.
                  Otherwise the selection of stonith-watchdog-timeout at
                  least is questionable.
              */
             op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op);
         }
 
         send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, remote_op, FALSE);
         peer->tried = TRUE;
         free_xml(remote_op);
         return;
 
     } else if (op->phase == st_phase_on) {
         /* A remapped "on" cannot be executed, but the node was already
          * turned off successfully, so ignore the error and continue.
          */
         crm_warn("Ignoring %s 'on' failure (no capable peers) targeting %s "
                  "after successful 'off'", device, op->target);
         advance_topology_device_in_level(op, device, NULL);
         return;
 
     } else if (op->owner == FALSE) {
         crm_err("Fencing (%s) targeting %s for client %s is not ours to control",
                 op->action, op->target, op->client_name);
 
     } else if (op->query_timer == 0) {
         /* We've exhausted all available peers */
         crm_info("No remaining peers capable of fencing (%s) %s for client %s "
                  CRM_XS " state=%s", op->action, op->target, op->client_name,
                  stonith_op_state_str(op->state));
         CRM_CHECK(op->state < st_done, return);
         finalize_timed_out_op(op, "All nodes failed, or are unable, to "
                                   "fence target");
 
     } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) {
         /* if the operation never left the query state,
          * but we have all the expected replies, then no devices
          * are available to execute the fencing operation. */
 
         if(stonith_watchdog_timeout_ms > 0 && pcmk__str_eq(device,
            STONITH_WATCHDOG_ID, pcmk__str_null_matches)) {
             if (check_watchdog_fencing_and_wait(op)) {
                 return;
             }
         }
 
         if (op->state == st_query) {
             crm_info("No peers (out of %d) have devices capable of fencing "
                      "(%s) %s for client %s " CRM_XS " state=%s",
                      op->replies, op->action, op->target, op->client_name,
                      stonith_op_state_str(op->state));
 
             pcmk__reset_result(&op->result);
             pcmk__set_result(&op->result, CRM_EX_ERROR,
                              PCMK_EXEC_NO_FENCE_DEVICE, NULL);
         } else {
             if (pcmk_is_set(op->call_options, st_opt_topology)) {
                 pcmk__reset_result(&op->result);
                 pcmk__set_result(&op->result, CRM_EX_ERROR,
                                  PCMK_EXEC_NO_FENCE_DEVICE, NULL);
             }
             /* ... else use existing result from previous failed attempt
              * (topology is not in use, and no devices remain to be attempted).
              * Overwriting the result with PCMK_EXEC_NO_FENCE_DEVICE would
              * prevent finalize_op() from setting the correct delegate if
              * needed.
              */
 
             crm_info("No peers (out of %d) are capable of fencing (%s) %s "
                      "for client %s " CRM_XS " state=%s",
                      op->replies, op->action, op->target, op->client_name,
                      stonith_op_state_str(op->state));
         }
 
         op->state = st_failed;
         finalize_op(op, NULL, false);
 
     } else {
         crm_info("Waiting for additional peers capable of fencing (%s) %s%s%s "
                  "for client %s " CRM_XS " id=%.8s",
                  op->action, op->target, (device? " using " : ""),
                  (device? device : ""), op->client_name, op->id);
     }
 }
 
 /*!
  * \internal
  * \brief Comparison function for sorting query results
  *
  * \param[in] a  GList item to compare
  * \param[in] b  GList item to compare
  *
  * \return Per the glib documentation, "a negative integer if the first value
  *         comes before the second, 0 if they are equal, or a positive integer
  *         if the first value comes after the second."
  */
 static gint
 sort_peers(gconstpointer a, gconstpointer b)
 {
     const peer_device_info_t *peer_a = a;
     const peer_device_info_t *peer_b = b;
 
     return (peer_b->ndevices - peer_a->ndevices);
 }
 
 /*!
  * \internal
  * \brief Determine if all the devices in the topology are found or not
  */
 static gboolean
 all_topology_devices_found(remote_fencing_op_t * op)
 {
     GList *device = NULL;
     GList *iter = NULL;
     device_properties_t *match = NULL;
     stonith_topology_t *tp = NULL;
     gboolean skip_target = FALSE;
     int i;
 
     tp = find_topology_for_host(op->target);
     if (!tp) {
         return FALSE;
     }
     if (pcmk__is_fencing_action(op->action)) {
         /* Don't count the devices on the target node if we are killing
          * the target node. */
         skip_target = TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         for (device = tp->levels[i]; device; device = device->next) {
             match = NULL;
             for (iter = op->query_results; iter && !match; iter = iter->next) {
                 peer_device_info_t *peer = iter->data;
 
                 if (skip_target && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
                     continue;
                 }
                 match = find_peer_device(op, peer, device->data);
             }
             if (!match) {
                 return FALSE;
             }
         }
     }
 
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Parse action-specific device properties from XML
  *
  * \param[in]     msg     XML element containing the properties
  * \param[in]     peer    Name of peer that sent XML (for logs)
  * \param[in]     device  Device ID (for logs)
  * \param[in]     action  Action the properties relate to (for logs)
  * \param[in]     phase   Phase the properties relate to
  * \param[in,out] props   Device properties to update
  */
 static void
 parse_action_specific(xmlNode *xml, const char *peer, const char *device,
                       const char *action, remote_fencing_op_t *op,
                       enum st_remap_phase phase, device_properties_t *props)
 {
     props->custom_action_timeout[phase] = 0;
     crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT,
                           &props->custom_action_timeout[phase]);
     if (props->custom_action_timeout[phase]) {
         crm_trace("Peer %s with device %s returned %s action timeout %d",
                   peer, device, action, props->custom_action_timeout[phase]);
     }
 
     props->delay_max[phase] = 0;
     crm_element_value_int(xml, F_STONITH_DELAY_MAX, &props->delay_max[phase]);
     if (props->delay_max[phase]) {
         crm_trace("Peer %s with device %s returned maximum of random delay %d for %s",
                   peer, device, props->delay_max[phase], action);
     }
 
     props->delay_base[phase] = 0;
     crm_element_value_int(xml, F_STONITH_DELAY_BASE, &props->delay_base[phase]);
     if (props->delay_base[phase]) {
         crm_trace("Peer %s with device %s returned base delay %d for %s",
                   peer, device, props->delay_base[phase], action);
     }
 
     /* Handle devices with automatic unfencing */
     if (pcmk__str_eq(action, "on", pcmk__str_casei)) {
         int required = 0;
 
         crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required);
         if (required) {
             crm_trace("Peer %s requires device %s to execute for action %s",
                       peer, device, action);
             add_required_device(op, device);
         }
     }
 
     /* If a reboot is remapped to off+on, it's possible that a node is allowed
      * to perform one action but not another.
      */
     if (pcmk__xe_attr_is_true(xml, F_STONITH_ACTION_DISALLOWED)) {
         props->disallowed[phase] = TRUE;
         crm_trace("Peer %s is disallowed from executing %s for device %s",
                   peer, action, device);
     }
 }
 
 /*!
  * \internal
  * \brief Parse one device's properties from peer's XML query reply
  *
  * \param[in]     xml       XML node containing device properties
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in,out] peer      Peer's device information
  * \param[in]     device    ID of device being parsed
  */
 static void
 add_device_properties(xmlNode *xml, remote_fencing_op_t *op,
                       peer_device_info_t *peer, const char *device)
 {
     xmlNode *child;
     int verified = 0;
     device_properties_t *props = calloc(1, sizeof(device_properties_t));
 
     /* Add a new entry to this peer's devices list */
     CRM_ASSERT(props != NULL);
     g_hash_table_insert(peer->devices, strdup(device), props);
 
     /* Peers with verified (monitored) access will be preferred */
     crm_element_value_int(xml, F_STONITH_DEVICE_VERIFIED, &verified);
     if (verified) {
         crm_trace("Peer %s has confirmed a verified device %s",
                   peer->host, device);
         props->verified = TRUE;
     }
 
     /* Parse action-specific device properties */
     parse_action_specific(xml, peer->host, device, op_requested_action(op),
                           op, st_phase_requested, props);
     for (child = pcmk__xml_first_child(xml); child != NULL;
          child = pcmk__xml_next(child)) {
         /* Replies for "reboot" operations will include the action-specific
          * values for "off" and "on" in child elements, just in case the reboot
          * winds up getting remapped.
          */
         if (pcmk__str_eq(ID(child), "off", pcmk__str_casei)) {
             parse_action_specific(child, peer->host, device, "off",
                                   op, st_phase_off, props);
         } else if (pcmk__str_eq(ID(child), "on", pcmk__str_casei)) {
             parse_action_specific(child, peer->host, device, "on",
                                   op, st_phase_on, props);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Parse a peer's XML query reply and add it to operation's results
  *
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in]     host      Name of peer that sent this reply
  * \param[in]     ndevices  Number of devices expected in reply
  * \param[in]     xml       XML node containing device list
  *
  * \return Newly allocated result structure with parsed reply
  */
 static peer_device_info_t *
 add_result(remote_fencing_op_t *op, const char *host, int ndevices, xmlNode *xml)
 {
     peer_device_info_t *peer = calloc(1, sizeof(peer_device_info_t));
     xmlNode *child;
 
     // cppcheck seems not to understand the abort logic in CRM_CHECK
     // cppcheck-suppress memleak
     CRM_CHECK(peer != NULL, return NULL);
     peer->host = strdup(host);
     peer->devices = pcmk__strkey_table(free, free);
 
     /* Each child element describes one capable device available to the peer */
     for (child = pcmk__xml_first_child(xml); child != NULL;
          child = pcmk__xml_next(child)) {
         const char *device = ID(child);
 
         if (device) {
             add_device_properties(child, op, peer, device);
         }
     }
 
     peer->ndevices = g_hash_table_size(peer->devices);
     CRM_CHECK(ndevices == peer->ndevices,
               crm_err("Query claimed to have %d device%s but %d found",
                       ndevices, pcmk__plural_s(ndevices), peer->ndevices));
 
     op->query_results = g_list_insert_sorted(op->query_results, peer, sort_peers);
     return peer;
 }
 
 /*!
  * \internal
  * \brief Handle a peer's reply to our fencing query
  *
  * Parse a query result from XML and store it in the remote operation
  * table, and when enough replies have been received, issue a fencing request.
  *
  * \param[in] msg  XML reply received
  *
  * \return pcmk_ok on success, -errno on error
  *
  * \note See initiate_remote_stonith_op() for how the XML query was initially
  *       formed, and stonith_query() for how the peer formed its XML reply.
  */
 int
 process_remote_stonith_query(xmlNode * msg)
 {
     int ndevices = 0;
     gboolean host_is_target = FALSE;
     gboolean have_all_replies = FALSE;
     const char *id = NULL;
     const char *host = NULL;
     remote_fencing_op_t *op = NULL;
     peer_device_info_t *peer = NULL;
     uint32_t replies_expected;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@" F_STONITH_AVAILABLE_DEVICES, msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
     crm_element_value_int(dev, F_STONITH_AVAILABLE_DEVICES, &ndevices);
 
     op = g_hash_table_lookup(stonith_remote_op_list, id);
     if (op == NULL) {
         crm_debug("Received query reply for unknown or expired operation %s",
                   id);
         return -EOPNOTSUPP;
     }
 
     replies_expected = fencing_active_peers();
     if (op->replies_expected < replies_expected) {
         replies_expected = op->replies_expected;
     }
     if ((++op->replies >= replies_expected) && (op->state == st_query)) {
         have_all_replies = TRUE;
     }
     host = crm_element_value(msg, F_ORIG);
     host_is_target = pcmk__str_eq(host, op->target, pcmk__str_casei);
 
     crm_info("Query result %d of %d from %s for %s/%s (%d device%s) %s",
              op->replies, replies_expected, host,
              op->target, op->action, ndevices, pcmk__plural_s(ndevices), id);
     if (ndevices > 0) {
         peer = add_result(op, host, ndevices, dev);
     }
 
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
 
     if (pcmk_is_set(op->call_options, st_opt_topology)) {
         /* If we start the fencing before all the topology results are in,
          * it is possible fencing levels will be skipped because of the missing
          * query results. */
         if (op->state == st_query && all_topology_devices_found(op)) {
             /* All the query results are in for the topology, start the fencing ops. */
             crm_trace("All topology devices found");
             request_peer_fencing(op, peer);
 
         } else if (have_all_replies) {
             crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             request_peer_fencing(op, NULL);
         }
 
     } else if (op->state == st_query) {
         int nverified = count_peer_devices(op, peer, TRUE);
 
         /* We have a result for a non-topology fencing op that looks promising,
          * go ahead and start fencing before query timeout */
         if ((peer != NULL) && !host_is_target && nverified) {
             /* we have a verified device living on a peer that is not the target */
             crm_trace("Found %d verified device%s",
                       nverified, pcmk__plural_s(nverified));
             request_peer_fencing(op, peer);
 
         } else if (have_all_replies) {
             crm_info("All query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             request_peer_fencing(op, NULL);
 
         } else {
             crm_trace("Waiting for more peer results before launching fencing operation");
         }
 
     } else if ((peer != NULL) && (op->state == st_done)) {
         crm_info("Discarding query result from %s (%d device%s): "
                  "Operation is %s", peer->host,
                  peer->ndevices, pcmk__plural_s(peer->ndevices),
                  stonith_op_state_str(op->state));
     }
 
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Handle a peer's reply to a fencing request
  *
  * Parse a fencing reply from XML, and either finalize the operation
  * or attempt another device as appropriate.
  *
  * \param[in] msg  XML reply received
  */
 void
 fenced_process_fencing_reply(xmlNode *msg)
 {
     const char *id = NULL;
     const char *device = NULL;
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
 
     CRM_CHECK(dev != NULL, return);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return);
 
     dev = stonith__find_xe_with_result(msg);
     CRM_CHECK(dev != NULL, return);
 
     stonith__xe_get_result(dev, &result);
 
     device = crm_element_value(dev, F_STONITH_DEVICE);
 
     if (stonith_remote_op_list) {
         op = g_hash_table_lookup(stonith_remote_op_list, id);
     }
 
     if ((op == NULL) && pcmk__result_ok(&result)) {
         /* Record successful fencing operations */
         const char *client_id = crm_element_value(dev, F_STONITH_CLIENTID);
 
         op = create_remote_stonith_op(client_id, dev, TRUE);
     }
 
     if (op == NULL) {
         /* Could be for an event that began before we started */
         /* TODO: Record the op for later querying */
         crm_info("Received peer result of unknown or expired operation %s", id);
         pcmk__reset_result(&result);
         return;
     }
 
     pcmk__reset_result(&op->result);
     op->result = result; // The operation takes ownership of the result
 
     if (op->devices && device && !pcmk__str_eq(op->devices->data, device, pcmk__str_casei)) {
         crm_err("Received outdated reply for device %s (instead of %s) to "
                 "fence (%s) %s. Operation already timed out at peer level.",
                 device, (const char *) op->devices->data, op->action, op->target);
         return;
     }
 
     if (pcmk__str_eq(crm_element_value(msg, F_SUBTYPE), "broadcast", pcmk__str_casei)) {
         if (pcmk__result_ok(&op->result)) {
             op->state = st_done;
         } else {
             op->state = st_failed;
         }
         finalize_op(op, msg, false);
         return;
 
     } else if (!pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) {
         /* If this isn't a remote level broadcast, and we are not the
          * originator of the operation, we should not be receiving this msg. */
         crm_err("Received non-broadcast fencing result for operation %.8s "
                 "we do not own (device %s targeting %s)",
                 op->id, device, op->target);
         return;
     }
 
     if (pcmk_is_set(op->call_options, st_opt_topology)) {
         const char *device = NULL;
         const char *reason = op->result.exit_reason;
 
         /* We own the op, and it is complete. broadcast the result to all nodes
          * and notify our local clients. */
         if (op->state == st_done) {
             finalize_op(op, msg, false);
             return;
         }
 
         device = crm_element_value(msg, F_STONITH_DEVICE);
 
         if ((op->phase == 2) && !pcmk__result_ok(&op->result)) {
             /* A remapped "on" failed, but the node was already turned off
              * successfully, so ignore the error and continue.
              */
             crm_warn("Ignoring %s 'on' failure (%s%s%s) targeting %s "
                      "after successful 'off'",
                      device, pcmk_exec_status_str(op->result.execution_status),
                      (reason == NULL)? "" : ": ",
                      (reason == NULL)? "" : reason,
                      op->target);
             pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
         } else {
             crm_notice("Action '%s' targeting %s using %s on behalf of %s@%s: "
                        "%s%s%s%s",
                        op->action, op->target, device, op->client_name,
                        op->originator,
                        pcmk_exec_status_str(op->result.execution_status),
                        (reason == NULL)? "" : " (",
                        (reason == NULL)? "" : reason,
                        (reason == NULL)? "" : ")");
         }
 
         if (pcmk__result_ok(&op->result)) {
             /* An operation completed successfully. Try another device if
              * necessary, otherwise mark the operation as done. */
             advance_topology_device_in_level(op, device, msg);
             return;
         } else {
             /* This device failed, time to try another topology level. If no other
              * levels are available, mark this operation as failed and report results. */
             if (advance_topology_level(op, false) != pcmk_rc_ok) {
                 op->state = st_failed;
                 finalize_op(op, msg, false);
                 return;
             }
         }
 
     } else if (pcmk__result_ok(&op->result) && (op->devices == NULL)) {
         op->state = st_done;
         finalize_op(op, msg, false);
         return;
 
     } else if ((op->result.execution_status == PCMK_EXEC_TIMEOUT)
                && (op->devices == NULL)) {
         /* If the operation timed out don't bother retrying other peers. */
         op->state = st_failed;
         finalize_op(op, msg, false);
         return;
 
     } else {
         /* fall-through and attempt other fencing action using another peer */
     }
 
     /* Retry on failure */
     crm_trace("Next for %s on behalf of %s@%s (result was: %s)",
               op->target, op->originator, op->client_name,
               pcmk_exec_status_str(op->result.execution_status));
     request_peer_fencing(op, NULL);
 }
 
 gboolean
 stonith_check_fence_tolerance(int tolerance, const char *target, const char *action)
 {
     GHashTableIter iter;
     time_t now = time(NULL);
     remote_fencing_op_t *rop = NULL;
 
     if (tolerance <= 0 || !stonith_remote_op_list || target == NULL ||
         action == NULL) {
         return FALSE;
     }
 
     g_hash_table_iter_init(&iter, stonith_remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) {
         if (strcmp(rop->target, target) != 0) {
             continue;
         } else if (rop->state != st_done) {
             continue;
         /* We don't have to worry about remapped reboots here
          * because if state is done, any remapping has been undone
          */
         } else if (strcmp(rop->action, action) != 0) {
             continue;
         } else if ((rop->completed + tolerance) < now) {
             continue;
         }
 
         crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s",
                    target, action, tolerance, rop->delegate, rop->originator);
         return TRUE;
     }
     return FALSE;
 }
diff --git a/doc/sphinx/Pacemaker_Administration/agents.rst b/doc/sphinx/Pacemaker_Administration/agents.rst
index 67cda86841..e5b17e273f 100644
--- a/doc/sphinx/Pacemaker_Administration/agents.rst
+++ b/doc/sphinx/Pacemaker_Administration/agents.rst
@@ -1,404 +1,443 @@
 .. index::
    single: resource agent
 
 Resource Agents
 ---------------
 
 
 Action Completion
 #################
 
 If one resource depends on another resource via constraints, the cluster will
 interpret an expected result as sufficient to continue with dependent actions.
 This may cause timing issues if the resource agent start returns before the
 service is not only launched but fully ready to perform its function, or if the
 resource agent stop returns before the service has fully released all its
 claims on system resources. At a minimum, the start or stop should not return
 before a status command would return the expected (started or stopped) result.
 
 
 .. index::
    single: OCF resource agent
    single: resource agent; OCF
 
 OCF Resource Agents
 ###################
 
 .. index::
    single: OCF resource agent; location
 
 Location of Custom Scripts
 __________________________
 
 OCF Resource Agents are found in ``/usr/lib/ocf/resource.d/$PROVIDER``
 
 When creating your own agents, you are encouraged to create a new directory
 under ``/usr/lib/ocf/resource.d/`` so that they are not confused with (or
 overwritten by) the agents shipped by existing providers.
 
 So, for example, if you choose the provider name of big-corp and want a new
 resource named big-app, you would create a resource agent called
 ``/usr/lib/ocf/resource.d/big-corp/big-app`` and define a resource:
  
 .. code-block: xml
 
    <primitive id="custom-app" class="ocf" provider="big-corp" type="big-app"/>
 
 
 .. index::
    single: OCF resource agent; action
 
 Actions
 _______
 
 All OCF resource agents are required to implement the following actions.
 
 .. table:: **Required Actions for OCF Agents**
 
    +--------------+-------------+------------------------------------------------+
    | Action       | Description | Instructions                                   |
    +==============+=============+================================================+
    | start        | Start the   | .. index::                                     |
    |              | resource    |    single: OCF resource agent; start           |
    |              |             |    single: start action                        |
    |              |             |                                                |
    |              |             | Return 0 on success and an appropriate         |
    |              |             | error code otherwise. Must not report          |
    |              |             | success until the resource is fully            |
    |              |             | active.                                        |
    +--------------+-------------+------------------------------------------------+
    | stop         | Stop the    | .. index::                                     |
    |              | resource    |    single: OCF resource agent; stop            |
    |              |             |    single: stop action                         |
    |              |             |                                                |
    |              |             | Return 0 on success and an appropriate         |
    |              |             | error code otherwise. Must not report          |
    |              |             | success until the resource is fully            |
    |              |             | stopped.                                       |
    +--------------+-------------+------------------------------------------------+
    | monitor      | Check the   | .. index::                                     |
    |              | resource's  |    single: OCF resource agent; monitor         |
    |              | state       |    single: monitor action                      |
    |              |             |                                                |
    |              |             | Exit 0 if the resource is running, 7           |
    |              |             | if it is stopped, and any other OCF            |
    |              |             | exit code if it is failed. NOTE: The           |
    |              |             | monitor script should test the state           |
    |              |             | of the resource on the local machine           |
    |              |             | only.                                          |
    +--------------+-------------+------------------------------------------------+
    | meta-data    | Describe    | .. index::                                     |
    |              | the         |    single: OCF resource agent; meta-data       |
    |              | resource    |    single: meta-data action                    |
    |              |             |                                                |
    |              |             | Provide information about this                 |
    |              |             | resource in the XML format defined by          |
    |              |             | the OCF standard. Exit with 0. NOTE:           |
    |              |             | This is *not* required to be performed         |
    |              |             | as root.                                       |
    +--------------+-------------+------------------------------------------------+
-   | validate-all | Verify the  | .. index::                                     |
-   |              | supplied    |    single: OCF resource agent; validate-all    |
-   |              | parameters  |    single: validate-all action                 |
-   |              |             |                                                |
-   |              |             | Return 0 if parameters are valid, 2 if         |
-   |              |             | not valid, and 6 if resource is not            |
-   |              |             | configured.                                    |
-   +--------------+-------------+------------------------------------------------+
 
-Additional requirements (not part of the OCF specification) are placed on
-agents that will be used for advanced concepts such as clone resources.
+OCF resource agents may optionally implement additional actions. Some are used
+only with advanced resource types such as clones.
 
 .. table:: **Optional Actions for OCF Resource Agents**
 
    +--------------+-------------+------------------------------------------------+
    | Action       | Description | Instructions                                   |
    +==============+=============+================================================+
+   | validate-all | This should | .. index::                                     |
+   |              | validate    |    single: OCF resource agent; validate-all    |
+   |              | the         |    single: validate-all action                 |
+   |              | instance    |                                                |
+   |              | parameters  | Return 0 if parameters are valid, 2 if         |
+   |              | provided.   | not valid, and 6 if resource is not            |
+   |              |             | configured.                                    |
+   +--------------+-------------+------------------------------------------------+
    | promote      | Bring the   | .. index::                                     |
    |              | local       |    single: OCF resource agent; promote         |
    |              | instance of |    single: promote action                      |
    |              | a promotable|                                                |
    |              | clone       | Return 0 on success                            |
    |              | resource to |                                                |
    |              | the promoted|                                                |
    |              | role.       |                                                |
    +--------------+-------------+------------------------------------------------+
    | demote       | Bring the   | .. index::                                     |
    |              | local       |    single: OCF resource agent; demote          |
    |              | instance of |    single: demote action                       |
    |              | a promotable|                                                |
    |              | clone       | Return 0 on success                            |
    |              | resource to |                                                |
    |              | the         |                                                |
    |              | unpromoted  |                                                |
    |              | role.       |                                                |
    +--------------+-------------+------------------------------------------------+
    | notify       | Used by the | .. index::                                     |
    |              | cluster to  |    single: OCF resource agent; notify          |
    |              | send        |    single: notify action                       |
    |              | the agent   |                                                |
    |              | pre- and    | Must not fail. Must exit with 0                |
    |              | post-       |                                                |
    |              | notification|                                                |
    |              | events      |                                                |
    |              | telling the |                                                |
    |              | resource    |                                                |
    |              | what has    |                                                |
    |              | happened and|                                                |
    |              | will happen.|                                                |
    +--------------+-------------+------------------------------------------------+
-
-One action specified in the OCF specs, ``recover``, is not currently used by
-the cluster. It is intended to be a variant of the ``start`` action that tries
-to recover a resource locally.
+   | reload       | Reload the  | .. index::                                     |
+   |              | service's   |    single: OCF resource agent; reload          |
+   |              | own         |    single: reload action                       |
+   |              | config.     |                                                |
+   |              |             | Not used by Pacemaker                          |
+   +--------------+-------------+------------------------------------------------+
+   | reload-agent | Make        | .. index::                                     |
+   |              | effective   |    single: OCF resource agent; reload-agent    |
+   |              | any changes |    single: reload-agent action                 |
+   |              | in instance |                                                |
+   |              | parameters  | This is used when the agent can handle a       |
+   |              | marked as   | change in some of its parameters more          |
+   |              | reloadable  | efficiently than stopping and starting the     |
+   |              | in the      | resource.                                      |
+   |              | agent's     |                                                |
+   |              | meta-data.  |                                                |
+   +--------------+-------------+------------------------------------------------+
+   | recover      | Restart the | .. index::                                     |
+   |              | service.    |    single: OCF resource agent; recover         |
+   |              |             |    single: recover action                      |
+   |              |             |                                                |
+   |              |             | Not used by Pacemaker                          |
+   +--------------+-------------+------------------------------------------------+
 
 .. important::
 
    If you create a new OCF resource agent, use `ocf-tester` to verify that the
    agent complies with the OCF standard properly.
 
 
 .. index::
    single: OCF resource agent; return code
 
 How are OCF Return Codes Interpreted?
 _____________________________________
 
 The first thing the cluster does is to check the return code against
 the expected result.  If the result does not match the expected value,
 then the operation is considered to have failed, and recovery action is
 initiated.
 
 There are three types of failure recovery:
 
 .. table:: **Types of recovery performed by the cluster**
 
    +-------+--------------------------------------------+--------------------------------------+
    | Type  | Description                                | Action Taken by the Cluster          |
    +=======+============================================+======================================+
    | soft  | .. index::                                 | Restart the resource or move it to a |
    |       |    single: OCF resource agent; soft error  | new location                         |
    |       |                                            |                                      |
    |       | A transient error occurred                 |                                      |
    +-------+--------------------------------------------+--------------------------------------+
    | hard  | .. index::                                 | Move the resource elsewhere and      |
    |       |    single: OCF resource agent; hard error  | prevent it from being retried on the |
    |       |                                            | current node                         |
    |       | A non-transient error that                 |                                      |
    |       | may be specific to the                     |                                      |
    |       | current node                               |                                      |
    +-------+--------------------------------------------+--------------------------------------+
    | fatal | .. index::                                 | Stop the resource and prevent it     |
    |       |    single: OCF resource agent; fatal error | from being started on any cluster    |
    |       |                                            | node                                 |
    |       | A non-transient error that                 |                                      |
    |       | will be common to all                      |                                      |
    |       | cluster nodes (e.g. a bad                  |                                      |
    |       | configuration was specified)               |                                      |
    +-------+--------------------------------------------+--------------------------------------+
 
 .. _ocf_return_codes:
 
 OCF Return Codes
 ________________
 
 The following table outlines the different OCF return codes and the type of
 recovery the cluster will initiate when a failure code is received. Although
 counterintuitive, even actions that return 0 (aka. ``OCF_SUCCESS``) can be
 considered to have failed, if 0 was not the expected return value.
 
 .. table:: **OCF Exit Codes and their Recovery Types**
 
    +-------+-----------------------+---------------------------------------------------+----------+
    | Exit  | OCF Alias             | Description                                       | Recovery |
    | Code  |                       |                                                   |          |
    +=======+=======================+===================================================+==========+
    | 0     | OCF_SUCCESS           | .. index::                                        | soft     |
    |       |                       |    single: OCF_SUCCESS                            |          |
    |       |                       |    single: OCF return code; OCF_SUCCESS           |          |
    |       |                       |    pair: OCF return code; 0                       |          |
    |       |                       |                                                   |          |
    |       |                       | Success. The command completed successfully.      |          |
    |       |                       | This is the expected result for all start,        |          |
    |       |                       | stop, promote and demote commands.                |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 1     | OCF_ERR_GENERIC       | .. index::                                        | soft     |
    |       |                       |    single: OCF_ERR_GENERIC                        |          |
    |       |                       |    single: OCF return code; OCF_ERR_GENERIC       |          |
    |       |                       |    pair: OCF return code; 1                       |          |
    |       |                       |                                                   |          |
    |       |                       | Generic "there was a problem" error code.         |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 2     | OCF_ERR_ARGS          | .. index::                                        | hard     |
    |       |                       |     single: OCF_ERR_ARGS                          |          |
    |       |                       |     single: OCF return code; OCF_ERR_ARGS         |          |
    |       |                       |     pair: OCF return code; 2                      |          |
    |       |                       |                                                   |          |
-   |       |                       | The resource's configuration is not valid on      |          |
-   |       |                       | this machine. E.g. it refers to a location        |          |
-   |       |                       | not found on the node.                            |          |
+   |       |                       | The resource's parameter values are not valid on  |          |
+   |       |                       | this machine (for example, a value refers to a    |          |
+   |       |                       | file not found on the local host).                |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 3     | OCF_ERR_UNIMPLEMENTED | .. index::                                        | hard     |
    |       |                       |    single: OCF_ERR_UNIMPLEMENTED                  |          |
    |       |                       |    single: OCF return code; OCF_ERR_UNIMPLEMENTED |          |
    |       |                       |    pair: OCF return code; 3                       |          |
    |       |                       |                                                   |          |
    |       |                       | The requested action is not implemented.          |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 4     | OCF_ERR_PERM          | .. index::                                        | hard     |
    |       |                       |    single: OCF_ERR_PERM                           |          |
    |       |                       |    single: OCF return code; OCF_ERR_PERM          |          |
    |       |                       |    pair: OCF return code; 4                       |          |
    |       |                       |                                                   |          |
    |       |                       | The resource agent does not have                  |          |
    |       |                       | sufficient privileges to complete the task.       |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 5     | OCF_ERR_INSTALLED     | .. index::                                        | hard     |
    |       |                       |    single: OCF_ERR_INSTALLED                      |          |
    |       |                       |    single: OCF return code; OCF_ERR_INSTALLED     |          |
    |       |                       |    pair: OCF return code; 5                       |          |
    |       |                       |                                                   |          |
    |       |                       | The tools required by the resource are            |          |
    |       |                       | not installed on this machine.                    |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 6     | OCF_ERR_CONFIGURED    | .. index::                                        | fatal    |
    |       |                       |    single: OCF_ERR_CONFIGURED                     |          |
    |       |                       |    single: OCF return code; OCF_ERR_CONFIGURED    |          |
    |       |                       |    pair: OCF return code; 6                       |          |
    |       |                       |                                                   |          |
-   |       |                       | The resource's configuration is invalid.          |          |
-   |       |                       | E.g. required parameters are missing.             |          |
+   |       |                       | The resource's parameter values are inherently    |          |
+   |       |                       | invalid (for example, a required parameter was    |          |
+   |       |                       | not given).                                       |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 7     | OCF_NOT_RUNNING       | .. index::                                        | N/A      |
    |       |                       |    single: OCF_NOT_RUNNING                        |          |
    |       |                       |    single: OCF return code; OCF_NOT_RUNNING       |          |
    |       |                       |    pair: OCF return code; 7                       |          |
    |       |                       |                                                   |          |
-   |       |                       | The resource is safely stopped. The cluster       |          |
-   |       |                       | will not attempt to stop a resource that          |          |
-   |       |                       | returns this for any action.                      |          |
+   |       |                       | The resource is safely stopped. This should only  |          |
+   |       |                       | be returned by monitor actions, not stop actions. |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 8     | OCF_RUNNING_PROMOTED  | .. index::                                        | soft     |
    |       |                       |    single: OCF_RUNNING_PROMOTED                   |          |
    |       |                       |    single: OCF return code; OCF_RUNNING_PROMOTED  |          |
    |       |                       |    pair: OCF return code; 8                       |          |
    |       |                       |                                                   |          |
    |       |                       | The resource is running in the promoted role.     |          |
    +-------+-----------------------+---------------------------------------------------+----------+
    | 9     | OCF_FAILED_PROMOTED   | .. index::                                        | soft     |
    |       |                       |    single: OCF_FAILED_PROMOTED                    |          |
    |       |                       |    single: OCF return code; OCF_FAILED_PROMOTED   |          |
    |       |                       |    pair: OCF return code; 9                       |          |
    |       |                       |                                                   |          |
    |       |                       | The resource is (or might be) in the promoted     |          |
    |       |                       | role but has failed. The resource will be         |          |
    |       |                       | demoted, stopped and then started (and possibly   |          |
    |       |                       | promoted) again.                                  |          |
    +-------+-----------------------+---------------------------------------------------+----------+
+   | 190   | OCF_DEGRADED          | .. index::                                        | none     |
+   |       |                       |    single: OCF_DEGRADED                           |          |
+   |       |                       |    single: OCF return code; OCF_DEGRADED          |          |
+   |       |                       |    pair: OCF return code; 190                     |          |
+   |       |                       |                                                   |          |
+   |       |                       | The resource is properly active, but in such a    |          |
+   |       |                       | condition that future failures are more likely.   |          |
+   +-------+-----------------------+---------------------------------------------------+----------+
+   | 191   | OCF_DEGRADED_PROMOTED | .. index::                                        | none     |
+   |       |                       |    single: OCF_DEGRADED_PROMOTED                  |          |
+   |       |                       |    single: OCF return code; OCF_DEGRADED_PROMOTED |          |
+   |       |                       |    pair: OCF return code; 191                     |          |
+   |       |                       |                                                   |          |
+   |       |                       | The resource is properly active in the promoted   |          |
+   |       |                       | role, but in such a condition that future         |          |
+   |       |                       | failures are more likely.                         |          |
+   +-------+-----------------------+---------------------------------------------------+----------+
    | other | *none*                | Custom error code.                                | soft     |
    +-------+-----------------------+---------------------------------------------------+----------+
 
 Exceptions to the recovery handling described above:
 
 * Probes (non-recurring monitor actions) that find a resource active
   (or in the promoted role) will not result in recovery action unless it is
   also found active elsewhere.
 * The recovery action taken when a resource is found active more than
   once is determined by the resource's ``multiple-active`` property.
 * Recurring actions that return ``OCF_ERR_UNIMPLEMENTED``
   do not cause any type of recovery.
+* Actions that return one of the "degraded" codes will be treated the same as
+  if they had returned success, but status output will indicate that the
+  resource is degraded.
 
 
 .. index::
    single: resource agent; LSB
    single: LSB resource agent
    single: init script
 
 LSB Resource Agents (Init Scripts)
 ##################################
 
 LSB Compliance
 ______________
 
 The relevant part of the
 `LSB specifications <http://refspecs.linuxfoundation.org/lsb.shtml>`_
 includes a description of all the return codes listed here.
     
 Assuming `some_service` is configured correctly and currently
 inactive, the following sequence will help you determine if it is
 LSB-compatible:
 
 #. Start (stopped):
  
    .. code-block:: none
 
       # /etc/init.d/some_service start ; echo "result: $?"
 
    * Did the service start?
    * Did the echo command print ``result: 0`` (in addition to the init script's
      usual output)?
 
 #. Status (running):
  
    .. code-block:: none
 
       # /etc/init.d/some_service status ; echo "result: $?"
 
    * Did the script accept the command?
    * Did the script indicate the service was running?
    * Did the echo command print ``result: 0`` (in addition to the init script's
      usual output)?
 
 #. Start (running):
  
    .. code-block:: none
 
       # /etc/init.d/some_service start ; echo "result: $?"
 
    * Is the service still running?
    * Did the echo command print ``result: 0`` (in addition to the init
       script's usual output)?
 
 #. Stop (running):
  
    .. code-block:: none
 
       # /etc/init.d/some_service stop ; echo "result: $?"
 
    * Was the service stopped?
    * Did the echo command print ``result: 0`` (in addition to the init
      script's usual output)?
 
 #. Status (stopped):
  
    .. code-block:: none
 
       # /etc/init.d/some_service status ; echo "result: $?"
 
    * Did the script accept the command?
    * Did the script indicate the service was not running?
    * Did the echo command print ``result: 3`` (in addition to the init
      script's usual output)?
 
 #. Stop (stopped):
  
    .. code-block:: none
 
       # /etc/init.d/some_service stop ; echo "result: $?"
 
    * Is the service still stopped?
    * Did the echo command print ``result: 0`` (in addition to the init
      script's usual output)?
 
 #. Status (failed):
 
    This step is not readily testable and relies on manual inspection of the script.
 
    The script can use one of the error codes (other than 3) listed in the
    LSB spec to indicate that it is active but failed. This tells the
    cluster that before moving the resource to another node, it needs to
    stop it on the existing one first.
 
 If the answer to any of the above questions is no, then the script is not
 LSB-compliant. Your options are then to either fix the script or write an OCF
 agent based on the existing script.
diff --git a/doc/sphinx/Pacemaker_Administration/cluster.rst b/doc/sphinx/Pacemaker_Administration/cluster.rst
index 069121f042..3713733418 100644
--- a/doc/sphinx/Pacemaker_Administration/cluster.rst
+++ b/doc/sphinx/Pacemaker_Administration/cluster.rst
@@ -1,71 +1,21 @@
 .. index::
    single: cluster layer
 
 The Cluster Layer
 -----------------
 
-Pacemaker and the Cluster Layer
-###############################
-
 Pacemaker utilizes an underlying cluster layer for two purposes:
 
 * obtaining quorum
 * messaging between nodes
 
-Currently, only Corosync 2 and later is supported for this layer.
-
 .. index::
    single: cluster layer; Corosync
    single: Corosync
 
-Managing Nodes in a Corosync-Based Cluster
-##########################################
-
-.. index::
-   pair: Corosync; add cluster node
-
-Adding a New Corosync Node
-__________________________
-
-To add a new node:
-
-#. Install Corosync and Pacemaker on the new host.
-#. Copy ``/etc/corosync/corosync.conf`` and ``/etc/corosync/authkey`` (if it
-   exists) from an existing node. You may need to modify the ``mcastaddr``
-   option to match the new node's IP address.
-#. Start the cluster software on the new host. If a log message containing
-   "Invalid digest" appears from Corosync, the keys are not consistent between
-   the machines.
-
-.. index::
-   pair: Corosync; remove cluster node
-
-Removing a Corosync Node
-________________________
-
-Because the messaging and membership layers are the authoritative
-source for cluster nodes, deleting them from the CIB is not a complete
-solution.  First, one must arrange for corosync to forget about the
-node (**pcmk-1** in the example below).
-
-#. Stop the cluster on the host to be removed. How to do this will vary with
-   your operating system and installed versions of cluster software, for example,
-   ``pcs cluster stop`` if you are using pcs for cluster management.
-#. From one of the remaining active cluster nodes, tell Pacemaker to forget
-   about the removed host, which will also delete the node from the CIB:
-
-   .. code-block:: none
-
-      # crm_node -R pcmk-1
-
-.. index::
-   pair: Corosync; replace cluster node
-
-Replacing a Corosync Node
-_________________________
-
-To replace an existing cluster node:
+Currently, only Corosync 2 and later is supported for this layer.
 
-#. Make sure the old node is completely stopped.
-#. Give the new machine the same hostname and IP address as the old one.
-#. Follow the procedure above for adding a node.
+This document assumes you have configured the cluster nodes in Corosync
+already. High-level cluster management tools are available that can configure
+Corosync for you. If you want the lower-level details, see the
+`Corosync documentation <https://corosync.github.io/corosync/>`_.
diff --git a/doc/sphinx/Pacemaker_Administration/installing.rst b/doc/sphinx/Pacemaker_Administration/installing.rst
index 179f4fe665..44a3f5f119 100644
--- a/doc/sphinx/Pacemaker_Administration/installing.rst
+++ b/doc/sphinx/Pacemaker_Administration/installing.rst
@@ -1,112 +1,9 @@
 Installing Cluster Software
 ---------------------------
 
 .. index:: installation
 
-Installing the Software
-#######################
-
 Most major Linux distributions have pacemaker packages in their standard
 package repositories, or the software can be built from source code.
 See the `Install wiki page <https://wiki.clusterlabs.org/wiki/Install>`_
 for details.
-
-Enabling Pacemaker
-##################
-
-.. index::
-   pair: configuration; Corosync
-
-Enabling Pacemaker For Corosync version 2 and greater
-_____________________________________________________
-
-High-level cluster management tools are available that can configure
-corosync for you. This document focuses on the lower-level details
-if you want to configure corosync yourself.
-
-Corosync configuration is normally located in
-``/etc/corosync/corosync.conf``.
-
-.. topic:: Corosync configuration file for two nodes **myhost1** and **myhost2**
-
-   .. code-block:: none
-
-      totem {
-        version: 2
-        secauth: off
-        cluster_name: mycluster
-        transport: udpu
-      }
-
-      nodelist {
-        node {
-              ring0_addr: myhost1
-              nodeid: 1
-             }
-        node {
-              ring0_addr: myhost2
-              nodeid: 2
-             }
-      }
-
-      quorum {
-        provider: corosync_votequorum
-        two_node: 1
-      }
-
-      logging {
-        to_syslog: yes
-      }
-
-.. topic:: Corosync configuration file for three nodes **myhost1**, **myhost2** and **myhost3**
-
-   .. code-block:: none
-
-      totem {
-        version: 2
-        secauth: off
-        cluster_name: mycluster
-        transport: udpu
-      }
-
-      nodelist {
-        node {
-              ring0_addr: myhost1
-              nodeid: 1
-        }
-        node {
-              ring0_addr: myhost2
-              nodeid: 2
-        }
-        node {
-              ring0_addr: myhost3
-              nodeid: 3
-        }
-      }
-
-      quorum {
-        provider: corosync_votequorum
-      }
-
-      logging {
-        to_syslog: yes
-      }
-
-In the above examples, the ``totem`` section defines what protocol version and
-options (including encryption) to use, [#]_
-and gives the cluster a unique name (``mycluster`` in these examples).
-
-The ``node`` section lists the nodes in this cluster.
-
-The ``quorum`` section defines how the cluster uses quorum. The important thing
-is that two-node clusters must be handled specially, so ``two_node: 1`` must be
-defined for two-node clusters (it will be ignored for clusters of any other
-size).
-
-The ``logging`` section should be self-explanatory.
-
-.. rubric:: Footnotes
-
-.. [#] Please consult the Corosync website (http://www.corosync.org/) and
-       documentation for details on enabling encryption and peer authentication
-       for the cluster.
diff --git a/doc/sphinx/Pacemaker_Development/helpers.rst b/doc/sphinx/Pacemaker_Development/helpers.rst
index 1c4cc74fc5..74b8166472 100644
--- a/doc/sphinx/Pacemaker_Development/helpers.rst
+++ b/doc/sphinx/Pacemaker_Development/helpers.rst
@@ -1,452 +1,452 @@
 C Development Helpers
 ---------------------
 
 .. index::
    single: unit testing
 
 Refactoring
 ###########
 
 Pacemaker uses an optional tool called `coccinelle <https://coccinelle.gitlabpages.inria.fr/website/>`_
 to do automatic refactoring.  coccinelle is a very complicated tool that can be
 difficult to understand, and the existing documentation makes it pretty tough
 to get started.  Much of the documentation is either aimed at kernel developers
 or takes the form of grammars.
 
 However, it can apply very complex transformations across an entire source tree.
 This is useful for tasks like code refactoring, changing APIs (number or type of
 arguments, etc.), catching functions that should not be called, and changing
 existing patterns.
 
 coccinelle is driven by input scripts called `semantic patches <https://coccinelle.gitlabpages.inria.fr/website/docs/index.html>`_
 written in its own language.  These scripts bear a passing resemblance to source
 code patches and tell coccinelle how to match and modify a piece of source
 code.  They are stored in ``devel/coccinelle`` and each script either contains
 a single source transformation or several related transformations.  In general,
 we try to keep these as simple as possible.
 
 In Pacemaker development, we use a couple targets in ``devel/Makefile.am`` to
 control coccinelle.  The ``cocci`` target tries to apply each script to every
 Pacemaker source file, printing out any changes it would make to the console.
 The ``cocci-inplace`` target does the same but also makes those changes to the
 source files.  A variety of warnings might also be printed.  If you aren't working
 on a new script, these can usually be ignored.
 
 If you are working on a new coccinelle script, it can be useful (and faster) to
 skip everything else and only run the new script.  The ``COCCI_FILES`` variable
 can be used for this:
 
 .. code-block:: none
 
    $ make -C devel COCCI_FILES=coccinelle/new-file.cocci cocci
 
 This variable is also used for preventing some coccinelle scripts in the Pacemaker
 source tree from running.  Some scripts are disabled because they are not currently
 fully working or because they are there as templates.  When adding a new script,
 remember to add it to this variable if it should always be run.
 
 One complication when writing coccinelle scripts is that certain Pacemaker source
 files may not use private functions (those whose name starts with ``pcmk__``).
 Handling this requires work in both the Makefile and in the coccinelle scripts.
 
 The Makefile deals with this by maintaining two lists of source files: those that
 may use private functions and those that may not.  For those that may, a special
 argument (``-D internal``) is added to the coccinelle command line.  This creates
 a virtual dependency named ``internal``.
 
 In the coccinelle scripts, those transformations that modify source code to use
 a private function also have a dependency on ``internal``.  If that dependency
 was given on the command line, the transformation will be run.  Otherwise, it will
 be skipped.
 
 This means that not all instances of an older style of code will be changed after
 running a given transformation.  Some developer intervention is still necessary
 to know whether a source code block should have been changed or not.
 
 Probably the easiest way to learn how to use coccinelle is by following other
 people's scripts.  In addition to the ones in the Pacemaker source directory,
 there's several others on the `coccinelle website <https://coccinelle.gitlabpages.inria.fr/website/rules/>`_.
 
 Sanitizers
 ##########
 
 gcc supports a variety of run-time checks called sanitizers.  These can be used to
 catch programming errors with memory, race conditions, various undefined behavior
 conditions, and more.  Because these are run-time checks, they should only be used
 during development and not in compiled packages or production code.
 
 Certain sanitizers cannot be combined with others because their run-time checks
 cause interfere.  Instead of trying to figure out which combinations work, it is
 simplest to just enable one at a time.
 
 Each supported sanitizer requires an installed libray.  In addition to just
 enabling the sanitizer, their use can be configured with environment variables.
 For example:
 
 .. code-block:: none
 
    $ ASAN_OPTIONS=verbosity=1:replace_str=true crm_mon -1R
 
 Pacemaker supports the following subset of gcc's sanitizers:
 
 +--------------------+-------------------------+----------+----------------------+
 | Sanitizer          | Configure Option        | Library  | Environment Variable |
 +====================+=========================+==========+======================+
 | Address            | --with-sanitizers=asan  | libasan  | ASAN_OPTIONS         |
 +--------------------+-------------------------+----------+----------------------+
 | Threads            | --with-sanitizers=tsan  | libtsan  | TSAN_OPTIONS         |
 +--------------------+-------------------------+----------+----------------------+
 | Undefined behavior | --with-sanitizers=ubsan | libubsan | UBSAN_OPTIONS        |
 +--------------------+-------------------------+----------+----------------------+
 
 The undefined behavior sanitizer further supports suboptions that need to be
 given as CFLAGS when configuring pacemaker:
 
 .. code-block:: none
 
    $ CFLAGS=-fsanitize=integer-divide-by-zero ./configure --with-sanitizers=ubsan
 
 For more information, see the `gcc documentation <https://gcc.gnu.org/onlinedocs/gcc/Instrumentation-Options.html>`_
 which also provides links to more information on each sanitizer.
 
 Unit Testing
 ############
 
 Where possible, changes to the C side of Pacemaker should be accompanied by unit
 tests.  Much of Pacemaker cannot effectively be unit tested (and there are other
 testing systems used for those parts), but the ``lib`` subdirectory is pretty easy
 to write tests for.
 
 Pacemaker uses the `cmocka unit testing framework <https://cmocka.org/>`_ which looks
 a lot like other unit testing frameworks for C and should be fairly familiar.  In
 addition to regular unit tests, cmocka also gives us the ability to use
 `mock functions <https://en.wikipedia.org/wiki/Mock_object>`_ for unit testing
 functions that would otherwise be difficult to test.
 
 Organization
 ____________
 
 Pay close attention to the organization and naming of test cases to ensure the
 unit tests continue to work as they should.
 
 Tests are spread throughout the source tree, alongside the source code they test.
 For instance, all the tests for the source code in ``lib/common/`` are in the
 ``lib/common/tests`` directory.  If there is no ``tests`` subdirectory, there are no
 tests for that library yet.
 
 Under that directory, there is a ``Makefile.am`` and additional subdirectories.  Each
 subdirectory contains the tests for a single library source file.  For instance,
 all the tests for ``lib/common/strings.c`` are in the ``lib/common/tests/strings``
 directory.  Note that the test subdirectory does not have a ``.c`` suffix.  If there
 is no test subdirectory, there are no tests for that file yet.
 
 Finally, under that directory, there is a ``Makefile.am`` and then various source
 files.  Each of these source files tests the single function that it is named
 after.  For instance, ``lib/common/tests/strings/pcmk__btoa_test.c`` tests the
-``pcmk__btoa_test()`` function in ``lib/common/strings.c``.  If there is no test
+``pcmk__btoa()`` function in ``lib/common/strings.c``.  If there is no test
 source file, there are no tests for that function yet.
 
 The ``_test`` suffix on the test source file is important.  All tests have this
 suffix, which means all the compiled test cases will also end with this suffix.
 That lets us ignore all the compiled tests with a single line in ``.gitignore``:
 
 .. code-block:: none
 
    /lib/*/tests/*/*_test
 
 Adding a test
 _____________
 
 Testing a new function in an already testable source file
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Follow these steps if you want to test a function in a source file where there
 are already other tested functions.  For the purposes of this example, we will
 add a test for the ``pcmk__scan_port()`` function in ``lib/common/strings.c``.  As
 you can see, there are already tests for other functions in this same file in
 the ``lib/common/tests/strings`` directory.
 
 * cd into ``lib/common/tests/strings``
 * Add the new file to the the ``check_PROGRAMS`` variable in ``Makefile.am``,
   making it something like this:
 
   .. code-block:: none
 
       check_PROGRAMS = \
              pcmk__add_word_test             \
              pcmk__btoa_test                 \
              pcmk__scan_port_test
 
 * Create a new ``pcmk__scan_port_test.c`` file, copying the copyright and include
   boilerplate from another file in the same directory.
 * Continue with the steps in `Writing the test`_.
 
 Testing a function in a source file without tests
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Follow these steps if you want to test a function in a source file where there
 are not already other tested functions, but there are tests for other files in
 the same library.  For the purposes of this example, we will add a test for the
 ``pcmk_acl_required()`` function in ``lib/common/acls.c``.  At the time of this
 documentation being written, no tests existed for that source file, so there
 is no ``lib/common/tests/acls`` directory.
 
 * Add to ``AC_CONFIG_FILES`` in the top-level ``configure.ac`` file so the build
   process knows to use directory we're about to create.  That variable would
   now look something like:
 
   .. code-block:: none
 
      dnl Other files we output
      AC_CONFIG_FILES(Makefile                                            \
                      ...
                      lib/common/tests/Makefile                           \
                      lib/common/tests/acls/Makefile                      \
                      lib/common/tests/agents/Makefile                    \
                      ...
      )
 
 * cd into ``lib/common/tests``
 * Add to the ``SUBDIRS`` variable in ``Makefile.am``, making it something like:
 
   .. code-block:: none
 
      SUBDIRS = agents acls cmdline flags operations strings utils xpath results
 
 * Create a new ``acls`` directory, copying the ``Makefile.am`` from some other
   directory.
 * cd into ``acls``
 * Get rid of any existing values for ``check_PROGRAMS`` and set it to
   ``pcmk_acl_required_test`` like so:
 
   .. code-block:: none
 
      check_PROGRAMS = pcmk_acl_required_test
 
 * Follow the steps in `Testing a new function in an already testable source file`_
   to create the new ``pcmk_acl_required_test.c`` file.
 
 Testing a function in a library without tests
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Adding a test case for a function in a library that doesn't have any test cases
 to begin with is only slightly more complicated.  In general, the steps are the
 same as for the previous section, except with an additional layer of directory
 creation.
 
 For the purposes of this example, we will add a test case for the
 ``lrmd_send_resource_alert()`` function in ``lib/lrmd/lrmd_alerts.c``.  Note that this
 may not be a very good function or even library to write actual unit tests for.
 
 * Add to ``AC_CONFIG_FILES`` in the top-level ``configure.ac`` file so the build
   process knows to use directory we're about to create.  That variable would
   now look something like:
 
   .. code-block:: none
 
      dnl Other files we output
      AC_CONFIG_FILES(Makefile                                            \
                      ...
                      lib/lrmd/Makefile                                   \
                      lib/lrmd/tests/Makefile                             \
                      lib/services/Makefile                               \
                      ...
      )
 
 * cd into ``lib/lrmd``
 * Create a ``SUBDIRS`` variable in ``Makefile.am`` if it doesn't already exist.
   Most libraries should not have this variable already.
 
   .. code-block:: none
 
      SUBDIRS = tests
 
 * Create a new ``tests`` directory and add a ``Makefile.am`` with the following
   contents:
 
   .. code-block:: none
 
      SUBDIRS = lrmd_alerts
 
 * Follow the steps in `Testing a function in a source file without tests`_ to create
   the rest of the new directory structure.
 
 * Follow the steps in `Testing a new function in an already testable source file`_
   to create the new ``lrmd_send_resource_alert_test.c`` file.
 
 Adding to an existing test case
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 If all you need to do is add additional test cases to an existing file, none of
 the above work is necessary.  All you need to do is find the test source file
 with the name matching your function and add to it and then follow the
 instructions in `Writing the test`_.
 
 Writing the test
 ________________
 
 A test case file contains a fair amount of boilerplate.  For this reason, it's
 usually easiest to just copy an existing file and adapt it to your needs.  However,
 here's the basic structure:
 
 .. code-block:: c
 
    /*
     * Copyright 2021 the Pacemaker project contributors
     *
     * The version control history for this file may have further details.
     *
     * This source code is licensed under the GNU Lesser General Public License
     * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
     */
 
    #include <crm_internal.h>
 
    #include <stdarg.h>
    #include <stddef.h>
    #include <stdint.h>
    #include <setjmp.h>
    #include <cmocka.h>
 
    /* Put your test-specific includes here */
 
    /* Put your test functions here */
 
    int
    main(int argc, char **argv)
    {
        /* Register your test functions here */
 
        cmocka_set_message_output(CM_OUTPUT_TAP);
        return cmocka_run_group_tests(tests, NULL, NULL);
    }
 
 Each test-specific function should test one aspect of the library function,
 though it can include many assertions if there are many ways of testing that
 one aspect.  For instance, there might be multiple ways of testing regular
 expression matching:
 
 .. code-block:: c
 
    static void
    regex(void **state) {
        const char *s1 = "abcd";
        const char *s2 = "ABCD";
 
        assert_int_equal(pcmk__strcmp(NULL, "a..d", pcmk__str_regex), 1);
        assert_int_equal(pcmk__strcmp(s1, NULL, pcmk__str_regex), 1);
        assert_int_equal(pcmk__strcmp(s1, "a..d", pcmk__str_regex), 0);
    }
 
 Each test-specific function must also be registered or it will not be called.
 This is done with ``cmocka_unit_test()`` in the ``main`` function:
 
 .. code-block:: c
 
    const struct CMUnitTest tests[] = {
        cmocka_unit_test(regex),
    };
 
 Running
 _______
 
 If you had to create any new files or directories, you will first need to run
 ``./configure`` from the top level of the source directory.  This will regenerate
 the Makefiles throughout the tree.  If you skip this step, your changes will be
 skipped and you'll be left wondering why the output doesn't match what you
 expected.
 
 To run the tests, simply run ``make check`` after previously building the source
 with ``make``.  The test cases in each directory will be built and then run.
 This should not take long.  If all the tests succeed, you will be back at the
 prompt.  Scrolling back through the history, you should see lines like the
 following:
 
 .. code-block:: none
 
     PASS: pcmk__strcmp_test 1 - same_pointer
     PASS: pcmk__strcmp_test 2 - one_is_null
     PASS: pcmk__strcmp_test 3 - case_matters
     PASS: pcmk__strcmp_test 4 - case_insensitive
     PASS: pcmk__strcmp_test 5 - regex
     ============================================================================
     Testsuite summary for pacemaker 2.1.0
     ============================================================================
     # TOTAL: 33
     # PASS:  33
     # SKIP:  0
     # XFAIL: 0
     # FAIL:  0
     # XPASS: 0
     # ERROR: 0
     ============================================================================
     make[7]: Leaving directory '/home/clumens/src/pacemaker/lib/common/tests/strings'
 
 The testing process will quit on the first failed test, and you will see lines
 like these:
 
 .. code-block:: none
 
    PASS: pcmk__scan_double_test 3 - trailing_chars
    FAIL: pcmk__scan_double_test 4 - typical_case
    PASS: pcmk__scan_double_test 5 - double_overflow
    PASS: pcmk__scan_double_test 6 - double_underflow
    ERROR: pcmk__scan_double_test - exited with status 1
    PASS: pcmk__starts_with_test 1 - bad_input
    ============================================================================
    Testsuite summary for pacemaker 2.1.0
    ============================================================================
    # TOTAL: 56
    # PASS:  54
    # SKIP:  0
    # XFAIL: 0
    # FAIL:  1
    # XPASS: 0
    # ERROR: 1
    ============================================================================
    See lib/common/tests/strings/test-suite.log
    Please report to users@clusterlabs.org
    ============================================================================
    make[7]: *** [Makefile:1218: test-suite.log] Error 1
    make[7]: Leaving directory '/home/clumens/src/pacemaker/lib/common/tests/strings'
 
 The failure is in ``lib/common/tests/strings/test-suite.log``:
 
 .. code-block:: none
 
    ERROR: pcmk__scan_double_test
    =============================
 
    1..6
    ok 1 - empty_input_string
    PASS: pcmk__scan_double_test 1 - empty_input_string
    ok 2 - bad_input_string
    PASS: pcmk__scan_double_test 2 - bad_input_string
    ok 3 - trailing_chars
    PASS: pcmk__scan_double_test 3 - trailing_chars
    not ok 4 - typical_case
    FAIL: pcmk__scan_double_test 4 - typical_case
    # 0.000000 != 3.000000
    # pcmk__scan_double_test.c:80: error: Failure!
    ok 5 - double_overflow
    PASS: pcmk__scan_double_test 5 - double_overflow
    ok 6 - double_underflow
    PASS: pcmk__scan_double_test 6 - double_underflow
    # not ok - tests
    ERROR: pcmk__scan_double_test - exited with status 1
 
 At this point, you need to determine whether your test case is incorrect or
 whether the code being tested is incorrect.  Fix whichever is wrong and continue.
 
 
 Debugging
 #########
 
 gdb
 ___
 
 If you use ``gdb`` for debugging, some helper functions are defined in
 ``devel/gdbhelpers``, which can be given to ``gdb`` using the ``-x`` option.
 
 From within the debugger, you can then invoke the ``pcmk`` command that
 will describe the helper functions available.
diff --git a/doc/sphinx/Pacemaker_Explained/acls.rst b/doc/sphinx/Pacemaker_Explained/acls.rst
index b9a622a52e..c3cfb6cb70 100644
--- a/doc/sphinx/Pacemaker_Explained/acls.rst
+++ b/doc/sphinx/Pacemaker_Explained/acls.rst
@@ -1,427 +1,432 @@
 .. index::
    single: Access Control List (ACL)
 
 .. _acl:
 
 Access Control Lists (ACLs)
 ---------------------------
 
 By default, the ``root`` user or any user in the ``haclient`` group can modify
 Pacemaker's CIB without restriction. Pacemaker offers *access control lists
 (ACLs)* to provide more fine-grained authorization.
    
 .. important::
 
    Being able to modify the CIB's resource section allows a user to run any
    executable file as root, by configuring it as an LSB resource with a full
    path.
 
 ACL Prerequisites
 #################
    
 In order to use ACLs:
 
 * The ``enable-acl`` :ref:`cluster option <cluster_options>` must be set to
   true.
 
 * Desired users must have user accounts in the ``haclient`` group on all
   cluster nodes in the cluster.
 
 * If your CIB was created before Pacemaker 1.1.12, it might need to be updated
   to the current schema (using ``cibadmin --upgrade`` or a higher-level tool
   equivalent) in order to use the syntax documented here.
 
 * Prior to the 2.1.0 release, the Pacemaker software had to have been built
   with ACL support. If you are using an older release, your installation
   supports ACLs only if the output of the command ``pacemakerd --features``
   contains ``acls``. In newer versions, ACLs are always enabled.
    
 
 .. index::
    single: Access Control List (ACL); acls
    pair: acls; XML element
 
 ACL Configuration
 #################
 
 ACLs are specified within an ``acls`` element of the CIB. The ``acls`` element
 may contain any number of ``acl_role``, ``acl_target``, and ``acl_group``
 elements.
    
 
 .. index::
    single: Access Control List (ACL); acl_role
    pair: acl_role; XML element
 
 ACL Roles
 #########
 
 An ACL *role* is a collection of permissions allowing or denying access to
 particular portions of the CIB. A role is configured with an ``acl_role``
 element in the CIB ``acls`` section.
    
 .. table:: **Properties of an acl_role element**
+   :widths: 1 3
 
    +------------------+-----------------------------------------------------------+
    | Attribute        | Description                                               |
    +==================+===========================================================+
    | id               | .. index::                                                |
    |                  |    single: acl_role; id (attribute)                       |
    |                  |    single: id; acl_role attribute                         |
    |                  |    single: attribute; id (acl_role)                       |
    |                  |                                                           |
    |                  | A unique name for the role *(required)*                   |
    +------------------+-----------------------------------------------------------+
    | description      | .. index::                                                |
    |                  |    single: acl_role; description (attribute)              |
    |                  |    single: description; acl_role attribute                |
    |                  |    single: attribute; description (acl_role)              |
    |                  |                                                           |
    |                  | Arbitrary text (not used by Pacemaker)                    |
    +------------------+-----------------------------------------------------------+
 
 An ``acl_role`` element may contain any number of ``acl_permission`` elements.
    
 .. index::
    single: Access Control List (ACL); acl_permission
    pair: acl_permission; XML element
 
 .. table:: **Properties of an acl_permission element**
+   :widths: 1 3
 
    +------------------+-----------------------------------------------------------+
    | Attribute        | Description                                               |
    +==================+===========================================================+
    | id               | .. index::                                                |
    |                  |    single: acl_permission; id (attribute)                 |
    |                  |    single: id; acl_permission attribute                   |
    |                  |    single: attribute; id (acl_permission)                 |
    |                  |                                                           |
    |                  | A unique name for the permission *(required)*             |
    +------------------+-----------------------------------------------------------+
    | description      | .. index::                                                |
    |                  |    single: acl_permission; description (attribute)        |
    |                  |    single: description; acl_permission attribute          |
    |                  |    single: attribute; description (acl_permission)        |
    |                  |                                                           |
    |                  | Arbitrary text (not used by Pacemaker)                    |
    +------------------+-----------------------------------------------------------+
    | kind             | .. index::                                                |
    |                  |    single: acl_permission; kind (attribute)               |
    |                  |    single: kind; acl_permission attribute                 |
    |                  |    single: attribute; kind (acl_permission)               |
    |                  |                                                           |
    |                  | The access being granted. Allowed values are ``read``,    |
    |                  | ``write``, and ``deny``. A value of ``write`` grants both |
    |                  | read and write access.                                    |
    +------------------+-----------------------------------------------------------+
    | object-type      | .. index::                                                |
    |                  |    single: acl_permission; object-type (attribute)        |
    |                  |    single: object-type; acl_permission attribute          |
    |                  |    single: attribute; object-type (acl_permission)        |
    |                  |                                                           |
    |                  | The name of an XML element in the CIB to which the        |
    |                  | permission applies. (Exactly one of ``object-type``,      |
    |                  | ``xpath``, and ``reference`` must be specified for a      |
    |                  | permission.)                                              |
    +------------------+-----------------------------------------------------------+
    | attribute        | .. index::                                                |
    |                  |    single: acl_permission; attribute (attribute)          |
    |                  |    single: attribute; acl_permission attribute            |
    |                  |    single: attribute; attribute (acl_permission)          |
    |                  |                                                           |
    |                  | If specified, the permission applies only to              |
    |                  | ``object-type`` elements that have this attribute set (to |
    |                  | any value). If not specified, the permission applies to   |
    |                  | all ``object-type`` elements. May only be used with       |
    |                  | ``object-type``.                                          |
    +------------------+-----------------------------------------------------------+
    | reference        | .. index::                                                |
    |                  |    single: acl_permission; reference (attribute)          |
    |                  |    single: reference; acl_permission attribute            |
    |                  |    single: attribute; reference (acl_permission)          |
    |                  |                                                           |
    |                  | The ID of an XML element in the CIB to which the          |
    |                  | permission applies. (Exactly one of ``object-type``,      |
    |                  | ``xpath``, and ``reference`` must be specified for a      |
    |                  | permission.)                                              |
    +------------------+-----------------------------------------------------------+
    | xpath            | .. index::                                                |
    |                  |    single: acl_permission; xpath (attribute)              |
    |                  |    single: xpath; acl_permission attribute                |
    |                  |    single: attribute; xpath (acl_permission)              |
    |                  |                                                           |
    |                  | An `XPath <https://www.w3.org/TR/xpath-10/>`_             |
    |                  | specification selecting an XML element in the CIB to      |
    |                  | which the permission applies. Attributes may be specified |
    |                  | in the XPath to select particular elements, but the       |
    |                  | permissions apply to the entire element. (Exactly one of  |
    |                  | ``object-type``, ``xpath``, and ``reference`` must be     |
    |                  | specified for a permission.)                              |
    +------------------+-----------------------------------------------------------+
 
 .. important::
 
    * Permissions are applied to the selected XML element's entire XML subtree
      (all elements enclosed within it).
    
    * Write permission grants the ability to create, modify, or remove the
      element and its subtree, and also the ability to create any "scaffolding"
      elements (enclosing elements that do not have attributes other than an
      ID).
    
    * Permissions for more specific matches (more deeply nested elements) take
      precedence over more general ones.
    
    * If multiple permissions are configured for the same match (for example, in
      different roles applied to the same user), any ``deny`` permission takes
      precedence, then ``write``, then lastly ``read``.
    
 
 ACL Targets and Groups
 ######################
    
 ACL targets correspond to user accounts on the system.
 
 .. index::
    single: Access Control List (ACL); acl_target
    pair: acl_target; XML element
 
 .. table:: **Properties of an acl_target element**
+   :widths: 1 3
 
    +------------------+-----------------------------------------------------------+
    | Attribute        | Description                                               |
    +==================+===========================================================+
    | id               | .. index::                                                |
    |                  |    single: acl_target; id (attribute)                     |
    |                  |    single: id; acl_target attribute                       |
    |                  |    single: attribute; id (acl_target)                     |
    |                  |                                                           |
    |                  | The name of a user on the system *(required)*             |
    +------------------+-----------------------------------------------------------+
 
 ACL groups may be specified, but are not currently used by Pacemaker. This is
 expected to change in a future version.
    
 .. index::
    single: Access Control List (ACL); acl_group
    pair: acl_group; XML element
 
 .. table:: **Properties of an acl_group element**
+   :widths: 1 3
 
    +------------------+-----------------------------------------------------------+
    | Attribute        | Description                                               |
    +==================+===========================================================+
    | id               | .. index::                                                |
    |                  |    single: acl_group; id (attribute)                      |
    |                  |    single: id; acl_group attribute                        |
    |                  |    single: attribute; id (acl_group)                      |
    |                  |                                                           |
    |                  | The name of a group on the system *(required)*            |
    +------------------+-----------------------------------------------------------+
 
 Each ``acl_target`` and ``acl_group`` element may contain any number of ``role``
 elements.
    
 .. index::
    single: Access Control List (ACL); role
    pair: role; XML element
 
 .. table:: **Properties of a role element**
+   :widths: 1 3
 
    +------------------+-----------------------------------------------------------+
    | Attribute        | Description                                               |
    +==================+===========================================================+
    | id               | .. index::                                                |
    |                  |    single: role; id (attribute)                           |
    |                  |    single: id; role attribute                             |
    |                  |    single: attribute; id (role)                           |
    |                  |                                                           |
    |                  | The ``id`` of an ``acl_role`` element that specifies      |
    |                  | permissions granted to the enclosing target or group.     |
    +------------------+-----------------------------------------------------------+
 
 .. important::
 
    The ``root`` and ``hacluster`` user accounts always have full access to the
    CIB, regardless of ACLs. For all other user accounts, when ``enable-acl`` is
    true, permission to all parts of the CIB is denied by default (permissions
    must be explicitly granted).
    
 ACL Examples
 ############
    
 .. code-block:: xml
 
    <acls>
    
       <acl_role id="read_all">
           <acl_permission id="read_all-cib" kind="read" xpath="/cib" />
       </acl_role>
    
       <acl_role id="operator">
    
           <acl_permission id="operator-maintenance-mode" kind="write"
               xpath="//crm_config//nvpair[@name='maintenance-mode']" />
    
           <acl_permission id="operator-maintenance-attr" kind="write"
               xpath="//nvpair[@name='maintenance']" />
    
           <acl_permission id="operator-target-role" kind="write"
               xpath="//resources//meta_attributes/nvpair[@name='target-role']" />
    
           <acl_permission id="operator-is-managed" kind="write"
               xpath="//resources//nvpair[@name='is-managed']" />
    
           <acl_permission id="operator-rsc_location" kind="write"
               object-type="rsc_location" />
    
       </acl_role>
    
       <acl_role id="administrator">
           <acl_permission id="administrator-cib" kind="write" xpath="/cib" />
       </acl_role>
    
       <acl_role id="minimal">
    
           <acl_permission id="minimal-standby" kind="read"
               description="allow reading standby node attribute (permanent or transient)"
               xpath="//instance_attributes/nvpair[@name='standby']"/>
    
           <acl_permission id="minimal-maintenance" kind="read"
               description="allow reading maintenance node attribute (permanent or transient)"
               xpath="//nvpair[@name='maintenance']"/>
    
           <acl_permission id="minimal-target-role" kind="read"
               description="allow reading resource target roles"
               xpath="//resources//meta_attributes/nvpair[@name='target-role']"/>
    
           <acl_permission id="minimal-is-managed" kind="read"
               description="allow reading resource managed status"
               xpath="//resources//meta_attributes/nvpair[@name='is-managed']"/>
    
           <acl_permission id="minimal-deny-instance-attributes" kind="deny"
               xpath="//instance_attributes"/>
    
           <acl_permission id="minimal-deny-meta-attributes" kind="deny"
               xpath="//meta_attributes"/>
    
           <acl_permission id="minimal-deny-operations" kind="deny"
               xpath="//operations"/>
    
           <acl_permission id="minimal-deny-utilization" kind="deny"
               xpath="//utilization"/>
    
           <acl_permission id="minimal-nodes" kind="read"
               description="allow reading node names/IDs (attributes are denied separately)"
               xpath="/cib/configuration/nodes"/>
    
           <acl_permission id="minimal-resources" kind="read"
               description="allow reading resource names/agents (parameters are denied separately)"
               xpath="/cib/configuration/resources"/>
    
           <acl_permission id="minimal-deny-constraints" kind="deny"
               xpath="/cib/configuration/constraints"/>
    
           <acl_permission id="minimal-deny-topology" kind="deny"
               xpath="/cib/configuration/fencing-topology"/>
    
           <acl_permission id="minimal-deny-op_defaults" kind="deny"
               xpath="/cib/configuration/op_defaults"/>
    
           <acl_permission id="minimal-deny-rsc_defaults" kind="deny"
               xpath="/cib/configuration/rsc_defaults"/>
    
           <acl_permission id="minimal-deny-alerts" kind="deny"
               xpath="/cib/configuration/alerts"/>
    
           <acl_permission id="minimal-deny-acls" kind="deny"
               xpath="/cib/configuration/acls"/>
    
           <acl_permission id="minimal-cib" kind="read"
               description="allow reading cib element and crm_config/status sections"
               xpath="/cib"/>
    
       </acl_role>
    
       <acl_target id="alice">
          <role id="minimal"/>
       </acl_target>
    
       <acl_target id="bob">
          <role id="read_all"/>
       </acl_target>
    
       <acl_target id="carol">
          <role id="read_all"/>
          <role id="operator"/>
       </acl_target>
    
       <acl_target id="dave">
          <role id="administrator"/>
       </acl_target>
    
    </acls>
 
 In the above example, the user ``alice`` has the minimal permissions necessary
 to run basic Pacemaker CLI tools, including using ``crm_mon`` to view the
 cluster status, without being able to modify anything. The user ``bob`` can
 view the entire configuration and status of the cluster, but not make any
 changes. The user ``carol`` can read everything, and change selected cluster
 properties as well as resource roles and location constraints. Finally,
 ``dave`` has full read and write access to the entire CIB.
 
 Looking at the ``minimal`` role in more depth, it is designed to allow read
 access to the ``cib`` tag itself, while denying access to particular portions
 of its subtree (which is the entire CIB).
 
 This is because the DC node is indicated in the ``cib`` tag, so ``crm_mon``
 will not be able to report the DC otherwise. However, this does change the
 security model to allow by default, since any portions of the CIB not
 explicitly denied will be readable. The ``cib`` read access could be removed
 and replaced with read access to just the ``crm_config`` and ``status``
 sections, for a safer approach at the cost of not seeing the DC in status
 output.
 
 For a simpler configuration, the ``minimal`` role allows read access to the
 entire ``crm_config`` section, which contains cluster properties. It would be
 possible to allow read access to specific properties instead (such as
 ``stonith-enabled``, ``dc-uuid``, ``have-quorum``, and ``cluster-name``) to
 restrict access further while still allowing status output, but cluster
 properties are unlikely to be considered sensitive.
 
 
 ACL Limitations
 ###############
 
 Actions performed via IPC rather than the CIB
 _____________________________________________
 
 ACLs apply *only* to the CIB.
 
 That means ACLs apply to command-line tools that operate by reading or writing
 the CIB, such as ``crm_attribute`` when managing permanent node attributes,
 ``crm_mon``, and ``cibadmin``.
 
 However, command-line tools that communicate directly with Pacemaker daemons
 via IPC are not affected by ACLs. For example, users in the ``haclient`` group
 may still do the following, regardless of ACLs:
 
 * Query transient node attribute values using ``crm_attribute`` and
   ``attrd_updater``.
 
 * Query basic node information using ``crm_node``.
 
 * Erase resource operation history using ``crm_resource``.
 
 * Query fencing configuration information, and execute fencing against nodes,
   using ``stonith_admin``.
 
 ACLs and Pacemaker Remote
 _________________________
 
 ACLs apply to commands run on Pacemaker Remote nodes using the Pacemaker Remote
 node's name as the ACL user name.
 
 The idea is that Pacemaker Remote nodes (especially virtual machines and
 containers) are likely to be purpose-built and have different user accounts
 from full cluster nodes.
diff --git a/doc/sphinx/Pacemaker_Explained/advanced-options.rst b/doc/sphinx/Pacemaker_Explained/advanced-options.rst
index 52bf427099..dd10f431a5 100644
--- a/doc/sphinx/Pacemaker_Explained/advanced-options.rst
+++ b/doc/sphinx/Pacemaker_Explained/advanced-options.rst
@@ -1,585 +1,586 @@
 Advanced Configuration
 ----------------------
 
 .. index::
    single: start-delay; operation attribute
    single: interval-origin; operation attribute
    single: interval; interval-origin
    single: operation; interval-origin
    single: operation; start-delay
 
 Specifying When Recurring Actions are Performed
 ###############################################
 
 By default, recurring actions are scheduled relative to when the resource
 started. In some cases, you might prefer that a recurring action start relative
 to a specific date and time. For example, you might schedule an in-depth
 monitor to run once every 24 hours, and want it to run outside business hours.
 
 To do this, set the operation's ``interval-origin``. The cluster uses this point
 to calculate the correct ``start-delay`` such that the operation will occur
 at ``interval-origin`` plus a multiple of the operation interval.
 
 For example, if the recurring operation's interval is 24h, its
 ``interval-origin`` is set to 02:00, and it is currently 14:32, then the
 cluster would initiate the operation after 11 hours and 28 minutes.
 
 The value specified for ``interval`` and ``interval-origin`` can be any
 date/time conforming to the
 `ISO8601 standard <https://en.wikipedia.org/wiki/ISO_8601>`_. By way of
 example, to specify an operation that would run on the first Monday of
 2021 and every Monday after that, you would add:
 
 .. topic:: Example recurring action that runs relative to base date/time
 
    .. code-block:: xml
 
       <op id="intensive-monitor" name="monitor" interval="P7D" interval-origin="2021-W01-1"/>
 
 .. index::
    single: resource; failure recovery
    single: operation; failure recovery
 
 .. _failure-handling:
 
 Handling Resource Failure
 #########################
 
 By default, Pacemaker will attempt to recover failed resources by restarting
 them. However, failure recovery is highly configurable.
 
 .. index::
    single: resource; failure count
    single: operation; failure count
 
 Failure Counts
 ______________
 
 Pacemaker tracks resource failures for each combination of node, resource, and
 operation (start, stop, monitor, etc.).
 
 You can query the fail count for a particular node, resource, and/or operation
 using the ``crm_failcount`` command. For example, to see how many times the
 10-second monitor for ``myrsc`` has failed on ``node1``, run:
 
 .. code-block:: none
 
    # crm_failcount --query -r myrsc -N node1 -n monitor -I 10s
 
 If you omit the node, ``crm_failcount`` will use the local node. If you omit
 the operation and interval, ``crm_failcount`` will display the sum of the fail
 counts for all operations on the resource.
 
 You can use ``crm_resource --cleanup`` or ``crm_failcount --delete`` to clear
 fail counts. For example, to clear the above monitor failures, run:
 
 .. code-block:: none
 
    # crm_resource --cleanup -r myrsc -N node1 -n monitor -I 10s
 
 If you omit the resource, ``crm_resource --cleanup`` will clear failures for
 all resources. If you omit the node, it will clear failures on all nodes. If
 you omit the operation and interval, it will clear the failures for all
 operations on the resource.
 
 .. note::
 
    Even when cleaning up only a single operation, all failed operations will
    disappear from the status display. This allows us to trigger a re-check of
    the resource's current status.
 
 Higher-level tools may provide other commands for querying and clearing
 fail counts.
 
 The ``crm_mon`` tool shows the current cluster status, including any failed
 operations. To see the current fail counts for any failed resources, call
 ``crm_mon`` with the ``--failcounts`` option. This shows the fail counts per
 resource (that is, the sum of any operation fail counts for the resource).
 
 .. index::
    single: migration-threshold; resource meta-attribute
    single: resource; migration-threshold
 
 Failure Response
 ________________
 
 Normally, if a running resource fails, pacemaker will try to stop it and start
 it again. Pacemaker will choose the best location to start it each time, which
 may be the same node that it failed on.
 
 However, if a resource fails repeatedly, it is possible that there is an
 underlying problem on that node, and you might desire trying a different node
 in such a case. Pacemaker allows you to set your preference via the
 ``migration-threshold`` resource meta-attribute. [#]_
 
 If you define ``migration-threshold`` to *N* for a resource, it will be banned
 from the original node after *N* failures there.
 
 .. note::
 
    The ``migration-threshold`` is per *resource*, even though fail counts are
    tracked per *operation*. The operation fail counts are added together
    to compare against the ``migration-threshold``.
 
 By default, fail counts remain until manually cleared by an administrator
 using ``crm_resource --cleanup`` or ``crm_failcount --delete`` (hopefully after
 first fixing the failure's cause). It is possible to have fail counts expire
 automatically by setting the ``failure-timeout`` resource meta-attribute.
 
 .. important::
 
    A successful operation does not clear past failures. If a recurring monitor
    operation fails once, succeeds many times, then fails again days later, its
    fail count is 2. Fail counts are cleared only by manual intervention or
    falure timeout.
 
 For example, setting ``migration-threshold`` to 2 and ``failure-timeout`` to
 ``60s`` would cause the resource to move to a new node after 2 failures, and
 allow it to move back (depending on stickiness and constraint scores) after one
 minute.
 
 .. note::
 
    ``failure-timeout`` is measured since the most recent failure. That is, older
    failures do not individually time out and lower the fail count. Instead, all
    failures are timed out simultaneously (and the fail count is reset to 0) if
    there is no new failure for the timeout period.
 
 There are two exceptions to the migration threshold: when a resource either
 fails to start or fails to stop.
 
 If the cluster property ``start-failure-is-fatal`` is set to ``true`` (which is
 the default), start failures cause the fail count to be set to ``INFINITY`` and
 thus always cause the resource to move immediately.
 
 Stop failures are slightly different and crucial.  If a resource fails to stop
 and fencing is enabled, then the cluster will fence the node in order to be
 able to start the resource elsewhere.  If fencing is disabled, then the cluster
 has no way to continue and will not try to start the resource elsewhere, but
 will try to stop it again after any failure timeout or clearing.
 
 .. index::
    single: resource; move
 
 Moving Resources
 ################
 
 Moving Resources Manually
 _________________________
 
 There are primarily two occasions when you would want to move a resource from
 its current location: when the whole node is under maintenance, and when a
 single resource needs to be moved.
 
 .. index::
    single: standby mode
    single: node; standby mode
 
 Standby Mode
 ~~~~~~~~~~~~
 
 Since everything eventually comes down to a score, you could create constraints
 for every resource to prevent them from running on one node. While Pacemaker
 configuration can seem convoluted at times, not even we would require this of
 administrators.
 
 Instead, you can set a special node attribute which tells the cluster "don't
 let anything run here". There is even a helpful tool to help query and set it,
 called ``crm_standby``. To check the standby status of the current machine,
 run:
 
 .. code-block:: none
 
    # crm_standby -G
 
 A value of ``on`` indicates that the node is *not* able to host any resources,
 while a value of ``off`` says that it *can*.
 
 You can also check the status of other nodes in the cluster by specifying the
 `--node` option:
 
 .. code-block:: none
 
    # crm_standby -G --node sles-2
 
 To change the current node's standby status, use ``-v`` instead of ``-G``:
 
 .. code-block:: none
 
    # crm_standby -v on
 
 Again, you can change another host's value by supplying a hostname with
 ``--node``.
 
 A cluster node in standby mode will not run resources, but still contributes to
 quorum, and may fence or be fenced by nodes.
 
 Moving One Resource
 ~~~~~~~~~~~~~~~~~~~
 
 When only one resource is required to move, we could do this by creating
 location constraints.  However, once again we provide a user-friendly shortcut
 as part of the ``crm_resource`` command, which creates and modifies the extra
 constraints for you.  If ``Email`` were running on ``sles-1`` and you wanted it
 moved to a specific location, the command would look something like:
 
 .. code-block:: none
 
    # crm_resource -M -r Email -H sles-2
 
 Behind the scenes, the tool will create the following location constraint:
 
 .. code-block:: xml
 
    <rsc_location id="cli-prefer-Email" rsc="Email" node="sles-2" score="INFINITY"/>
 
 It is important to note that subsequent invocations of ``crm_resource -M`` are
 not cumulative. So, if you ran these commands:
 
 .. code-block:: none
 
    # crm_resource -M -r Email -H sles-2
    # crm_resource -M -r Email -H sles-3
 
 then it is as if you had never performed the first command.
 
 To allow the resource to move back again, use:
 
 .. code-block:: none
 
    # crm_resource -U -r Email
 
 Note the use of the word *allow*.  The resource *can* move back to its original
 location, but depending on ``resource-stickiness``, location constraints, and
 so forth, it might stay where it is.
 
 To be absolutely certain that it moves back to ``sles-1``, move it there before
 issuing the call to ``crm_resource -U``:
 
 .. code-block:: none
 
    # crm_resource -M -r Email -H sles-1
    # crm_resource -U -r Email
 
 Alternatively, if you only care that the resource should be moved from its
 current location, try:
 
 .. code-block:: none
 
    # crm_resource -B -r Email
 
 which will instead create a negative constraint, like:
 
 .. code-block:: xml
 
    <rsc_location id="cli-ban-Email-on-sles-1" rsc="Email" node="sles-1" score="-INFINITY"/>
 
 This will achieve the desired effect, but will also have long-term
 consequences. As the tool will warn you, the creation of a ``-INFINITY``
 constraint will prevent the resource from running on that node until
 ``crm_resource -U`` is used. This includes the situation where every other
 cluster node is no longer available!
 
 In some cases, such as when ``resource-stickiness`` is set to ``INFINITY``, it
 is possible that you will end up with the problem described in
 :ref:`node-score-equal`. The tool can detect some of these cases and deals with
 them by creating both positive and negative constraints. For example:
 
 .. code-block:: xml
 
    <rsc_location id="cli-ban-Email-on-sles-1" rsc="Email" node="sles-1" score="-INFINITY"/>
    <rsc_location id="cli-prefer-Email" rsc="Email" node="sles-2" score="INFINITY"/>
 
 which has the same long-term consequences as discussed earlier.
 
 Moving Resources Due to Connectivity Changes
 ____________________________________________
 
 You can configure the cluster to move resources when external connectivity is
 lost in two steps.
 
 .. index::
    single: ocf:pacemaker:ping resource
    single: ping resource
 
 Tell Pacemaker to Monitor Connectivity
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 First, add an ``ocf:pacemaker:ping`` resource to the cluster. The ``ping``
 resource uses the system utility of the same name to a test whether a list of
 machines (specified by DNS hostname or IP address) are reachable, and uses the
 results to maintain a node attribute.
 
 The node attribute is called ``pingd`` by default, but is customizable in order
 to allow multiple ping groups to be defined.
 
 Normally, the ping resource should run on all cluster nodes, which means that
 you'll need to create a clone. A template for this can be found below, along
 with a description of the most interesting parameters.
 
 .. table:: **Commonly Used ocf:pacemaker:ping Resource Parameters**
+   :widths: 1 4
 
    +--------------------+--------------------------------------------------------------+
    | Resource Parameter | Description                                                  |
    +====================+==============================================================+
    | dampen             | .. index::                                                   |
    |                    |    single: ocf:pacemaker:ping resource; dampen parameter     |
    |                    |    single: dampen; ocf:pacemaker:ping resource parameter     |
    |                    |                                                              |
    |                    | The time to wait (dampening) for further changes to occur.   |
    |                    | Use this to prevent a resource from bouncing around the      |
    |                    | cluster when cluster nodes notice the loss of connectivity   |
    |                    | at slightly different times.                                 |
    +--------------------+--------------------------------------------------------------+
    | multiplier         | .. index::                                                   |
    |                    |    single: ocf:pacemaker:ping resource; multiplier parameter |
    |                    |    single: multiplier; ocf:pacemaker:ping resource parameter |
    |                    |                                                              |
    |                    | The number of connected ping nodes gets multiplied by this   |
    |                    | value to get a score. Useful when there are multiple ping    |
    |                    | nodes configured.                                            |
    +--------------------+--------------------------------------------------------------+
    | host_list          | .. index::                                                   |
    |                    |    single: ocf:pacemaker:ping resource; host_list parameter  |
    |                    |    single: host_list; ocf:pacemaker:ping resource parameter  |
    |                    |                                                              |
    |                    | The machines to contact in order to determine the current    |
    |                    | connectivity status. Allowed values include resolvable DNS   |
    |                    | connectivity host names, IPv4 addresses, and IPv6 addresses. |
    +--------------------+--------------------------------------------------------------+
 
 .. topic:: Example ping resource that checks node connectivity once every minute
 
    .. code-block:: xml
 
       <clone id="Connected">
          <primitive id="ping" class="ocf" provider="pacemaker" type="ping">
           <instance_attributes id="ping-attrs">
             <nvpair id="ping-dampen"     name="dampen" value="5s"/>
             <nvpair id="ping-multiplier" name="multiplier" value="1000"/>
             <nvpair id="ping-hosts"      name="host_list" value="my.gateway.com www.bigcorp.com"/>
           </instance_attributes>
           <operations>
             <op id="ping-monitor-60s" interval="60s" name="monitor"/>
           </operations>
          </primitive>
       </clone>
 
 .. important::
 
    You're only half done. The next section deals with telling Pacemaker how to
    deal with the connectivity status that ``ocf:pacemaker:ping`` is recording.
 
 Tell Pacemaker How to Interpret the Connectivity Data
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. important::
 
    Before attempting the following, make sure you understand
    :ref:`rules`.
 
 There are a number of ways to use the connectivity data.
 
 The most common setup is for people to have a single ping target (for example,
 the service network's default gateway), to prevent the cluster from running a
 resource on any unconnected node.
 
 .. topic:: Don't run a resource on unconnected nodes
 
    .. code-block:: xml
 
       <rsc_location id="WebServer-no-connectivity" rsc="Webserver">
          <rule id="ping-exclude-rule" score="-INFINITY" >
             <expression id="ping-exclude" attribute="pingd" operation="not_defined"/>
          </rule>
       </rsc_location>
 
 A more complex setup is to have a number of ping targets configured. You can
 require the cluster to only run resources on nodes that can connect to all (or
 a minimum subset) of them.
 
 .. topic:: Run only on nodes connected to three or more ping targets
 
    .. code-block:: xml
 
       <primitive id="ping" provider="pacemaker" class="ocf" type="ping">
       ... <!-- omitting some configuration to highlight important parts -->
          <nvpair id="ping-multiplier" name="multiplier" value="1000"/>
       ...
       </primitive>
       ...
       <rsc_location id="WebServer-connectivity" rsc="Webserver">
          <rule id="ping-prefer-rule" score="-INFINITY" >
             <expression id="ping-prefer" attribute="pingd" operation="lt" value="3000"/>
          </rule>
       </rsc_location>
 
 Alternatively, you can tell the cluster only to *prefer* nodes with the best
 connectivity, by using ``score-attribute`` in the rule. Just be sure to set
 ``multiplier`` to a value higher than that of ``resource-stickiness`` (and
 don't set either of them to ``INFINITY``).
 
 .. topic:: Prefer node with most connected ping nodes
 
    .. code-block:: xml
 
       <rsc_location id="WebServer-connectivity" rsc="Webserver">
          <rule id="ping-prefer-rule" score-attribute="pingd" >
             <expression id="ping-prefer" attribute="pingd" operation="defined"/>
          </rule>
       </rsc_location>
 
 It is perhaps easier to think of this in terms of the simple constraints that
 the cluster translates it into. For example, if ``sles-1`` is connected to all
 five ping nodes but ``sles-2`` is only connected to two, then it would be as if
 you instead had the following constraints in your configuration:
 
 .. topic:: How the cluster translates the above location constraint
 
    .. code-block:: xml
 
       <rsc_location id="ping-1" rsc="Webserver" node="sles-1" score="5000"/>
       <rsc_location id="ping-2" rsc="Webserver" node="sles-2" score="2000"/>
 
 The advantage is that you don't have to manually update any constraints
 whenever your network connectivity changes.
 
 You can also combine the concepts above into something even more complex. The
 example below shows how you can prefer the node with the most connected ping
 nodes provided they have connectivity to at least three (again assuming that
 ``multiplier`` is set to 1000).
 
 .. topic:: More complex example of choosing location based on connectivity
 
    .. code-block:: xml
 
       <rsc_location id="WebServer-connectivity" rsc="Webserver">
          <rule id="ping-exclude-rule" score="-INFINITY" >
             <expression id="ping-exclude" attribute="pingd" operation="lt" value="3000"/>
          </rule>
          <rule id="ping-prefer-rule" score-attribute="pingd" >
             <expression id="ping-prefer" attribute="pingd" operation="defined"/>
          </rule>
       </rsc_location>
 
 
 .. _live-migration:
 
 Migrating Resources
 ___________________
 
 Normally, when the cluster needs to move a resource, it fully restarts the
 resource (that is, it stops the resource on the current node and starts it on
 the new node).
 
 However, some types of resources, such as many virtual machines, are able to
 move to another location without loss of state (often referred to as live
 migration or hot migration). In pacemaker, this is called resource migration.
 Pacemaker can be configured to migrate a resource when moving it, rather than
 restarting it.
 
 Not all resources are able to migrate; see the
 :ref:`migration checklist <migration_checklist>` below. Even those that can,
 won't do so in all situations. Conceptually, there are two requirements from
 which the other prerequisites follow:
 
 * The resource must be active and healthy at the old location; and
 * everything required for the resource to run must be available on both the old
   and new locations.
 
 The cluster is able to accommodate both *push* and *pull* migration models by
 requiring the resource agent to support two special actions: ``migrate_to``
 (performed on the current location) and ``migrate_from`` (performed on the
 destination).
 
 In push migration, the process on the current location transfers the resource
 to the new location where is it later activated. In this scenario, most of the
 work would be done in the ``migrate_to`` action and, if anything, the
 activation would occur during ``migrate_from``.
 
 Conversely for pull, the ``migrate_to`` action is practically empty and
 ``migrate_from`` does most of the work, extracting the relevant resource state
 from the old location and activating it.
 
 There is no wrong or right way for a resource agent to implement migration, as
 long as it works.
 
 .. _migration_checklist:
 
 .. topic:: Migration Checklist
 
    * The resource may not be a clone.
    * The resource agent standard must be OCF.
    * The resource must not be in a failed or degraded state.
    * The resource agent must support ``migrate_to`` and ``migrate_from``
      actions, and advertise them in its meta-data.
    * The resource must have the ``allow-migrate`` meta-attribute set to
      ``true`` (which is not the default).
 
 If an otherwise migratable resource depends on another resource via an ordering
 constraint, there are special situations in which it will be restarted rather
 than migrated.
 
 For example, if the resource depends on a clone, and at the time the resource
 needs to be moved, the clone has instances that are stopping and instances that
 are starting, then the resource will be restarted. The scheduler is not yet
 able to model this situation correctly and so takes the safer (if less optimal)
 path.
 
 Also, if a migratable resource depends on a non-migratable resource, and both
 need to be moved, the migratable resource will be restarted.
 
 
 .. index::
    single: reload
    single: reload-agent
 
 Reloading an Agent After a Definition Change
 ############################################
 
 The cluster automatically detects changes to the configuration of active
 resources. The cluster's normal response is to stop the service (using the old
 definition) and start it again (with the new definition). This works, but some
 resource agents are smarter and can be told to use a new set of options without
 restarting.
 
 To take advantage of this capability, the resource agent must:
 
 * Implement the ``reload-agent`` action. What it should do depends completely
   on your application!
 
   .. note::
 
      Resource agents may also implement a ``reload`` action to make the managed
      service reload its own *native* configuration. This is different from
      ``reload-agent``, which makes effective changes in the resource's
      *Pacemaker* configuration (specifically, the values of the agent's
      reloadable parameters).
 
 * Advertise the ``reload-agent`` operation in the ``actions`` section of its
   meta-data.
 
 * Set the ``reloadable`` attribute to 1 in the ``parameters`` section of
   its meta-data for any parameters eligible to be reloaded after a change.
 
 Once these requirements are satisfied, the cluster will automatically know to
 reload the resource (instead of restarting) when a reloadable parameter
 changes.
 
 .. note::
 
    Metadata will not be re-read unless the resource needs to be started. If you
    edit the agent of an already active resource to set a parameter reloadable,
    the resource may restart the first time the parameter value changes.
 
 .. note::
 
    If both a reloadable and non-reloadable parameter are changed
    simultaneously, the resource will be restarted.
 
 .. rubric:: Footnotes
 
 .. [#] The naming of this option was perhaps unfortunate as it is easily
        confused with live migration, the process of moving a resource from one
        node to another without stopping it.  Xen virtual guests are the most
        common example of resources that can be migrated in this manner.
diff --git a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst b/doc/sphinx/Pacemaker_Explained/advanced-resources.rst
index 320dc39f3b..bcc5c37971 100644
--- a/doc/sphinx/Pacemaker_Explained/advanced-resources.rst
+++ b/doc/sphinx/Pacemaker_Explained/advanced-resources.rst
@@ -1,1613 +1,1628 @@
 Advanced Resource Types
 -----------------------
 
 .. index:
    single: group resource
    single: resource; group
 
 .. _group-resources:
 
 Groups - A Syntactic Shortcut
 #############################
 
 One of the most common elements of a cluster is a set of resources
 that need to be located together, start sequentially, and stop in the
 reverse order.  To simplify this configuration, we support the concept
 of groups.
    
 .. topic:: A group of two primitive resources
 
    .. code-block:: xml
 
       <group id="shortcut">
          <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
           <instance_attributes id="params-public-ip">
              <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
           </instance_attributes>
          </primitive>
          <primitive id="Email" class="lsb" type="exim"/>
       </group> 
    
 Although the example above contains only two resources, there is no
 limit to the number of resources a group can contain.  The example is
 also sufficient to explain the fundamental properties of a group:
    
 * Resources are started in the order they appear in (**Public-IP** first,
   then **Email**)
 * Resources are stopped in the reverse order to which they appear in
   (**Email** first, then **Public-IP**)
    
 If a resource in the group can't run anywhere, then nothing after that
 is allowed to run, too.
    
 * If **Public-IP** can't run anywhere, neither can **Email**;
 * but if **Email** can't run anywhere, this does not affect **Public-IP**
   in any way
    
 The group above is logically equivalent to writing:
    
 .. topic:: How the cluster sees a group resource
 
    .. code-block:: xml
 
       <configuration>
          <resources>
           <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
            <instance_attributes id="params-public-ip">
               <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
            </instance_attributes>
           </primitive>
           <primitive id="Email" class="lsb" type="exim"/>
          </resources>
          <constraints>
             <rsc_colocation id="xxx" rsc="Email" with-rsc="Public-IP" score="INFINITY"/>
             <rsc_order id="yyy" first="Public-IP" then="Email"/>
          </constraints>
       </configuration> 
 
 Obviously as the group grows bigger, the reduced configuration effort
 can become significant.
 
 Another (typical) example of a group is a DRBD volume, the filesystem
 mount, an IP address, and an application that uses them.
 
 .. index::
    pair: XML element; group
 
 Group Properties
 ________________
 
 .. table:: **Properties of a Group Resource**
+   :widths: 1 4
 
    +-------+--------------------------------------+
    | Field | Description                          |
    +=======+======================================+
    | id    | .. index::                           |
    |       |    single: group; property, id       |
    |       |    single: property; id (group)      |
    |       |    single: id; group property        |
    |       |                                      |
    |       | A unique name for the group          |
    +-------+--------------------------------------+
 
 Group Options
 _____________
 
 Groups inherit the ``priority``, ``target-role``, and ``is-managed`` properties
 from primitive resources. See :ref:`resource_options` for information about
 those properties.
    
 .. table:: **Group-specific configuration options**
+   :class: longtable
+   :widths: 1 1 3
 
    +-------------------+-----------------+-------------------------------------------------------+
    | Meta-Attribute    | Default         | Description                                           |
    +===================+=================+=======================================================+
    | ordered           | true            |  .. index::                                           |
    |                   |                 |     single: group; option, ordered                    |
    |                   |                 |     single: option; ordered (group)                   |
    |                   |                 |     single: ordered; group option                     |
    |                   |                 |                                                       |
    |                   |                 | If **true**, group members will be started in the     |
    |                   |                 | order they are listed in the configuration (and       |
    |                   |                 | stopped in the reverse order).                        |
    +-------------------+-----------------+-------------------------------------------------------+
 
 Group Instance Attributes
 _________________________
 
 Groups have no instance attributes. However, any that are set for the group
 object will be inherited by the group's children.
    
 Group Contents
 ______________
 
 Groups may only contain a collection of cluster resources (see
 :ref:`primitive-resource`).  To refer to a child of a group resource, just use
 the child's ``id`` instead of the group's.
    
 Group Constraints
 _________________
    
 Although it is possible to reference a group's children in
 constraints, it is usually preferable to reference the group itself.
    
 .. topic:: Some constraints involving groups
 
    .. code-block:: xml
 
       <constraints>
           <rsc_location id="group-prefers-node1" rsc="shortcut" node="node1" score="500"/>
           <rsc_colocation id="webserver-with-group" rsc="Webserver" with-rsc="shortcut"/>
           <rsc_order id="start-group-then-webserver" first="Webserver" then="shortcut"/>
       </constraints> 
 
 .. index::
    pair: resource-stickiness; group
 
 Group Stickiness
 ________________
    
 Stickiness, the measure of how much a resource wants to stay where it
 is, is additive in groups.  Every active resource of the group will
 contribute its stickiness value to the group's total.  So if the
 default ``resource-stickiness`` is 100, and a group has seven members,
 five of which are active, then the group as a whole will prefer its
 current location with a score of 500.
 
 .. index::
    single: clone
    single: resource; clone
    
 .. _s-resource-clone:
 
 Clones - Resources That Can Have Multiple Active Instances
 ##########################################################
 
 *Clone* resources are resources that can have more than one copy active at the
 same time. This allows you, for example, to run a copy of a daemon on every
 node. You can clone any primitive or group resource [#]_.
    
 Anonymous versus Unique Clones
 ______________________________
    
 A clone resource is configured to be either *anonymous* or *globally unique*.
    
 Anonymous clones are the simplest. These behave completely identically
 everywhere they are running. Because of this, there can be only one instance of
 an anonymous clone active per node.
          
 The instances of globally unique clones are distinct entities. All instances
 are launched identically, but one instance of the clone is not identical to any
 other instance, whether running on the same node or a different node. As an
 example, a cloned IP address can use special kernel functionality such that
 each instance handles a subset of requests for the same IP address.
 
 .. index::
    single: promotable clone
    single: resource; promotable
 
 .. _s-resource-promotable:
 
 Promotable clones
 _________________
 
 If a clone is *promotable*, its instances can perform a special role that
 Pacemaker will manage via the ``promote`` and ``demote`` actions of the resource
 agent.
 
 Services that support such a special role have various terms for the special
 role and the default role: primary and secondary, master and replica,
 controller and worker, etc. Pacemaker uses the terms *promoted* and
 *unpromoted* to be agnostic to what the service calls them or what they do.
    
 All that Pacemaker cares about is that an instance comes up in the unpromoted role
 when started, and the resource agent supports the ``promote`` and ``demote`` actions
 to manage entering and exiting the promoted role.
 
 .. index::
    pair: XML element; clone
    
 Clone Properties
 ________________
    
 .. table:: **Properties of a Clone Resource**
+   :widths: 1 4
 
    +-------+--------------------------------------+
    | Field | Description                          |
    +=======+======================================+
    | id    | .. index::                           |
    |       |    single: clone; property, id       |
    |       |    single: property; id (clone)      |
    |       |    single: id; clone property        |
    |       |                                      |
    |       | A unique name for the clone          |
    +-------+--------------------------------------+
 
 .. index::
    pair: options; clone
 
 Clone Options
 _____________
 
 :ref:`Options <resource_options>` inherited from primitive resources:
 ``priority, target-role, is-managed``
    
 .. table:: **Clone-specific configuration options**
+   :class: longtable
+   :widths: 1 1 3
 
    +-------------------+-----------------+-------------------------------------------------------+
    | Field             | Default         | Description                                           |
    +===================+=================+=======================================================+
    | globally-unique   | false           |  .. index::                                           |
    |                   |                 |     single: clone; option, globally-unique            |
    |                   |                 |     single: option; globally-unique (clone)           |
    |                   |                 |     single: globally-unique; clone option             |
    |                   |                 |                                                       |
    |                   |                 | If **true**, each clone instance performs a           |
    |                   |                 | distinct function                                     |
    +-------------------+-----------------+-------------------------------------------------------+
    | clone-max         | 0               | .. index::                                            |
    |                   |                 |    single: clone; option, clone-max                   |
    |                   |                 |    single: option; clone-max (clone)                  |
    |                   |                 |    single: clone-max; clone option                    |
    |                   |                 |                                                       |
    |                   |                 | The maximum number of clone instances that can        |
    |                   |                 | be started across the entire cluster. If 0, the       |
    |                   |                 | number of nodes in the cluster will be used.          |
    +-------------------+-----------------+-------------------------------------------------------+
    | clone-node-max    | 1               | .. index::                                            |
    |                   |                 |    single: clone; option, clone-node-max              |
    |                   |                 |    single: option; clone-node-max (clone)             |
    |                   |                 |    single: clone-node-max; clone option               |
    |                   |                 |                                                       |
    |                   |                 | If ``globally-unique`` is **true**, the maximum       |
    |                   |                 | number of clone instances that can be started         |
    |                   |                 | on a single node                                      |
    +-------------------+-----------------+-------------------------------------------------------+
    | clone-min         | 0               | .. index::                                            |
    |                   |                 |    single: clone; option, clone-min                   |
    |                   |                 |    single: option; clone-min (clone)                  |
    |                   |                 |    single: clone-min; clone option                    |
    |                   |                 |                                                       |
    |                   |                 | Require at least this number of clone instances       |
    |                   |                 | to be runnable before allowing resources              |
    |                   |                 | depending on the clone to be runnable. A value        |
    |                   |                 | of 0 means require all clone instances to be          |
    |                   |                 | runnable.                                             |
    +-------------------+-----------------+-------------------------------------------------------+
    | notify            | false           | .. index::                                            |
    |                   |                 |    single: clone; option, notify                      |
    |                   |                 |    single: option; notify (clone)                     |
    |                   |                 |    single: notify; clone option                       |
    |                   |                 |                                                       |
    |                   |                 | Call the resource agent's **notify** action for       |
    |                   |                 | all active instances, before and after starting       |
    |                   |                 | or stopping any clone instance. The resource          |
    |                   |                 | agent must support this action.                       |
    |                   |                 | Allowed values: **false**, **true**                   |
    +-------------------+-----------------+-------------------------------------------------------+
    | ordered           | false           | .. index::                                            |
    |                   |                 |    single: clone; option, ordered                     |
    |                   |                 |    single: option; ordered (clone)                    |
    |                   |                 |    single: ordered; clone option                      |
    |                   |                 |                                                       |
    |                   |                 | If **true**, clone instances must be started          |
    |                   |                 | sequentially instead of in parallel.                  |
    |                   |                 | Allowed values: **false**, **true**                   |
    +-------------------+-----------------+-------------------------------------------------------+
    | interleave        | false           | .. index::                                            |
    |                   |                 |    single: clone; option, interleave                  |
    |                   |                 |    single: option; interleave (clone)                 |
    |                   |                 |    single: interleave; clone option                   |
    |                   |                 |                                                       |
    |                   |                 | When this clone is ordered relative to another        |
    |                   |                 | clone, if this option is **false** (the default),     |
    |                   |                 | the ordering is relative to *all* instances of        |
    |                   |                 | the other clone, whereas if this option is            |
    |                   |                 | **true**, the ordering is relative only to            |
    |                   |                 | instances on the same node.                           |
    |                   |                 | Allowed values: **false**, **true**                   |
    +-------------------+-----------------+-------------------------------------------------------+
    | promotable        | false           | .. index::                                            |
    |                   |                 |    single: clone; option, promotable                  |
    |                   |                 |    single: option; promotable (clone)                 |
    |                   |                 |    single: promotable; clone option                   |
    |                   |                 |                                                       |
    |                   |                 | If **true**, clone instances can perform a            |
    |                   |                 | special role that Pacemaker will manage via the       |
    |                   |                 | resource agent's **promote** and **demote**           |
    |                   |                 | actions. The resource agent must support these        |
    |                   |                 | actions.                                              |
    |                   |                 | Allowed values: **false**, **true**                   |
    +-------------------+-----------------+-------------------------------------------------------+
    | promoted-max      | 1               | .. index::                                            |
    |                   |                 |    single: clone; option, promoted-max                |
    |                   |                 |    single: option; promoted-max (clone)               |
    |                   |                 |    single: promoted-max; clone option                 |
    |                   |                 |                                                       |
    |                   |                 | If ``promotable`` is **true**, the number of          |
    |                   |                 | instances that can be promoted at one time            |
    |                   |                 | across the entire cluster                             |
    +-------------------+-----------------+-------------------------------------------------------+
    | promoted-node-max | 1               | .. index::                                            |
    |                   |                 |    single: clone; option, promoted-node-max           |
    |                   |                 |    single: option; promoted-node-max (clone)          |
    |                   |                 |    single: promoted-node-max; clone option            |
    |                   |                 |                                                       |
    |                   |                 | If ``promotable`` is **true** and ``globally-unique`` |
    |                   |                 | is **false**, the number of clone instances can be    |
    |                   |                 | promoted at one time on a single node                 |
    +-------------------+-----------------+-------------------------------------------------------+
    
 .. note:: **Deprecated Terminology**
 
    In older documentation and online examples, you may see promotable clones
    referred to as *multi-state*, *stateful*, or *master/slave*; these mean the
    same thing as *promotable*. Certain syntax is supported for backward
    compatibility, but is deprecated and will be removed in a future version:
 
    * Using a ``master`` tag, instead of a ``clone`` tag with the ``promotable``
      meta-attribute set to ``true``
    * Using the ``master-max`` meta-attribute instead of ``promoted-max``
    * Using the ``master-node-max`` meta-attribute instead of
      ``promoted-node-max``
    * Using ``Master`` as a role name instead of ``Promoted``
    * Using ``Slave`` as a role name instead of ``Unpromoted``
 
    
 Clone Contents
 ______________
    
 Clones must contain exactly one primitive or group resource.
    
 .. topic:: A clone that runs a web server on all nodes
 
    .. code-block:: xml
 
       <clone id="apache-clone">
           <primitive id="apache" class="lsb" type="apache">
               <operations>
                  <op id="apache-monitor" name="monitor" interval="30"/>
               </operations>
           </primitive>
       </clone> 
 
 .. warning::
 
    You should never reference the name of a clone's child (the primitive or group
    resource being cloned). If you think you need to do this, you probably need to
    re-evaluate your design.
    
 Clone Instance Attribute
 ________________________
    
 Clones have no instance attributes; however, any that are set here will be
 inherited by the clone's child.
    
 .. index::
    single: clone; constraint
 
 Clone Constraints
 _________________
    
 In most cases, a clone will have a single instance on each active cluster
 node.  If this is not the case, you can indicate which nodes the
 cluster should preferentially assign copies to with resource location
 constraints.  These constraints are written no differently from those
 for primitive resources except that the clone's **id** is used.
    
 .. topic:: Some constraints involving clones
 
    .. code-block:: xml
 
       <constraints>
           <rsc_location id="clone-prefers-node1" rsc="apache-clone" node="node1" score="500"/>
           <rsc_colocation id="stats-with-clone" rsc="apache-stats" with="apache-clone"/>
           <rsc_order id="start-clone-then-stats" first="apache-clone" then="apache-stats"/>
       </constraints> 
    
 Ordering constraints behave slightly differently for clones.  In the
 example above, ``apache-stats`` will wait until all copies of ``apache-clone``
 that need to be started have done so before being started itself.
 Only if *no* copies can be started will ``apache-stats`` be prevented
 from being active.  Additionally, the clone will wait for
 ``apache-stats`` to be stopped before stopping itself.
 
 Colocation of a primitive or group resource with a clone means that
 the resource can run on any node with an active instance of the clone.
 The cluster will choose an instance based on where the clone is running and
 the resource's own location preferences.
 
 Colocation between clones is also possible.  If one clone **A** is colocated
 with another clone **B**, the set of allowed locations for **A** is limited to
 nodes on which **B** is (or will be) active.  Placement is then performed
 normally.
    
 .. index::
    single: promotable clone; constraint
 
 .. _promotable-clone-constraints:
 
 Promotable Clone Constraints
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    
 For promotable clone resources, the ``first-action`` and/or ``then-action`` fields
 for ordering constraints may be set to ``promote`` or ``demote`` to constrain the
 promoted role, and colocation constraints may contain ``rsc-role`` and/or
 ``with-rsc-role`` fields.
 
 .. topic:: Constraints involving promotable clone resources       
 
    .. code-block:: xml
 
       <constraints>
          <rsc_location id="db-prefers-node1" rsc="database" node="node1" score="500"/>
          <rsc_colocation id="backup-with-db-unpromoted" rsc="backup"
            with-rsc="database" with-rsc-role="Unpromoted"/>
          <rsc_colocation id="myapp-with-db-promoted" rsc="myApp"
            with-rsc="database" with-rsc-role="Promoted"/>
          <rsc_order id="start-db-before-backup" first="database" then="backup"/>
          <rsc_order id="promote-db-then-app" first="database" first-action="promote"
            then="myApp" then-action="start"/>
       </constraints> 
 
 In the example above, **myApp** will wait until one of the database
 copies has been started and promoted before being started
 itself on the same node.  Only if no copies can be promoted will **myApp** be
 prevented from being active.  Additionally, the cluster will wait for
 **myApp** to be stopped before demoting the database.
 
 Colocation of a primitive or group resource with a promotable clone
 resource means that it can run on any node with an active instance of
 the promotable clone resource that has the specified role (``Promoted`` or
 ``Unpromoted``).  In the example above, the cluster will choose a location
 based on where database is running in the promoted role, and if there are
 multiple promoted instances it will also factor in **myApp**'s own location
 preferences when deciding which location to choose.
 
 Colocation with regular clones and other promotable clone resources is also
 possible.  In such cases, the set of allowed locations for the **rsc**
 clone is (after role filtering) limited to nodes on which the
 ``with-rsc`` promotable clone resource is (or will be) in the specified role.
 Placement is then performed as normal.
    
 Using Promotable Clone Resources in Colocation Sets
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 When a promotable clone is used in a :ref:`resource set <s-resource-sets>`
 inside a colocation constraint, the resource set may take a ``role`` attribute.
 
 In the following example, an instance of **B** may be promoted only on a node
 where **A** is in the promoted role. Additionally, resources **C** and **D**
 must be located on a node where both **A** and **B** are promoted.
    
 .. topic:: Colocate C and D with A's and B's promoted instances
 
    .. code-block:: xml
 
       <constraints>
           <rsc_colocation id="coloc-1" score="INFINITY" >
             <resource_set id="colocated-set-example-1" sequential="true" role="Promoted">
               <resource_ref id="A"/>
               <resource_ref id="B"/>
             </resource_set>
             <resource_set id="colocated-set-example-2" sequential="true">
               <resource_ref id="C"/>
               <resource_ref id="D"/>
             </resource_set>
           </rsc_colocation>
       </constraints>
    
 Using Promotable Clone Resources in Ordered Sets
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 When a promotable clone is used in a :ref:`resource set <s-resource-sets>`
 inside an ordering constraint, the resource set may take an ``action``
 attribute.
 
 .. topic:: Start C and D after first promoting A and B
 
    .. code-block:: xml
 
       <constraints>
           <rsc_order id="order-1" score="INFINITY" >
             <resource_set id="ordered-set-1" sequential="true" action="promote">
               <resource_ref id="A"/>
               <resource_ref id="B"/>
             </resource_set>
             <resource_set id="ordered-set-2" sequential="true" action="start">
               <resource_ref id="C"/>
               <resource_ref id="D"/>
             </resource_set>
           </rsc_order>
       </constraints>
    
 In the above example, **B** cannot be promoted until **A** has been promoted.
 Additionally, resources **C** and **D** must wait until **A** and **B** have
 been promoted before they can start.
 
 .. index::
    pair: resource-stickiness; clone
    
 .. _s-clone-stickiness:
 
 Clone Stickiness
 ________________
    
 To achieve a stable allocation pattern, clones are slightly sticky by
 default.  If no value for ``resource-stickiness`` is provided, the clone
 will use a value of 1.  Being a small value, it causes minimal
 disturbance to the score calculations of other resources but is enough
 to prevent Pacemaker from needlessly moving copies around the cluster.
    
 .. note::
 
    For globally unique clones, this may result in multiple instances of the
    clone staying on a single node, even after another eligible node becomes
    active (for example, after being put into standby mode then made active again).
    If you do not want this behavior, specify a ``resource-stickiness`` of 0
    for the clone temporarily and let the cluster adjust, then set it back
    to 1 if you want the default behavior to apply again.
    
 .. important::
 
    If ``resource-stickiness`` is set in the ``rsc_defaults`` section, it will
    apply to clone instances as well. This means an explicit ``resource-stickiness``
    of 0 in ``rsc_defaults`` works differently from the implicit default used when
    ``resource-stickiness`` is not specified.
    
 Clone Resource Agent Requirements
 _________________________________
    
 Any resource can be used as an anonymous clone, as it requires no
 additional support from the resource agent.  Whether it makes sense to
 do so depends on your resource and its resource agent.
    
 Resource Agent Requirements for Globally Unique Clones
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    
 Globally unique clones require additional support in the resource agent. In
 particular, it must only respond with ``${OCF_SUCCESS}`` if the node has that
 exact instance active. All other probes for instances of the clone should
 result in ``${OCF_NOT_RUNNING}`` (or one of the other OCF error codes if
 they are failed).
 
 Individual instances of a clone are identified by appending a colon and a
 numerical offset, e.g. **apache:2**.
 
 Resource agents can find out how many copies there are by examining
 the ``OCF_RESKEY_CRM_meta_clone_max`` environment variable and which
 instance it is by examining ``OCF_RESKEY_CRM_meta_clone``.
 
 The resource agent must not make any assumptions (based on
 ``OCF_RESKEY_CRM_meta_clone``) about which numerical instances are active.  In
 particular, the list of active copies will not always be an unbroken
 sequence, nor always start at 0.
    
 Resource Agent Requirements for Promotable Clones
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 Promotable clone resources require two extra actions, ``demote`` and ``promote``,
 which are responsible for changing the state of the resource. Like **start** and
 **stop**, they should return ``${OCF_SUCCESS}`` if they completed successfully or
 a relevant error code if they did not.
 
 The states can mean whatever you wish, but when the resource is
 started, it must come up in the unpromoted role. From there, the
 cluster will decide which instances to promote.
 
 In addition to the clone requirements for monitor actions, agents must
 also *accurately* report which state they are in.  The cluster relies
 on the agent to report its status (including role) accurately and does
 not indicate to the agent what role it currently believes it to be in.
    
 .. table:: **Role implications of OCF return codes**
+   :widths: 1 3
 
    +----------------------+--------------------------------------------------+
    | Monitor Return Code  | Description                                      |
    +======================+==================================================+
    | OCF_NOT_RUNNING      | .. index::                                       |
    |                      |    single: OCF_NOT_RUNNING                       |
    |                      |    single: OCF return code; OCF_NOT_RUNNING      |
    |                      |                                                  |
    |                      | Stopped                                          |
    +----------------------+--------------------------------------------------+
    | OCF_SUCCESS          | .. index::                                       |
    |                      |    single: OCF_SUCCESS                           |
    |                      |    single: OCF return code; OCF_SUCCESS          |
    |                      |                                                  |
    |                      | Running (Unpromoted)                             |
    +----------------------+--------------------------------------------------+
    | OCF_RUNNING_PROMOTED | .. index::                                       |
    |                      |    single: OCF_RUNNING_PROMOTED                  |
    |                      |    single: OCF return code; OCF_RUNNING_PROMOTED |
    |                      |                                                  |
    |                      | Running (Promoted)                               |
    +----------------------+--------------------------------------------------+
    | OCF_FAILED_PROMOTED  | .. index::                                       |
    |                      |    single: OCF_FAILED_PROMOTED                   |
    |                      |    single: OCF return code; OCF_FAILED_PROMOTED  |
    |                      |                                                  |
    |                      | Failed (Promoted)                                |
    +----------------------+--------------------------------------------------+
    | Other                | .. index::                                       |
    |                      |    single: return code                           |
    |                      |                                                  |
    |                      | Failed (Unpromoted)                              |
    +----------------------+--------------------------------------------------+
    
 Clone Notifications
 ~~~~~~~~~~~~~~~~~~~
    
 If the clone has the ``notify`` meta-attribute set to **true**, and the resource
 agent supports the ``notify`` action, Pacemaker will call the action when
 appropriate, passing a number of extra variables which, when combined with
 additional context, can be used to calculate the current state of the cluster
 and what is about to happen to it.
 
 .. index::
    single: clone; environment variables
    single: notify; environment variables
    
 .. table:: **Environment variables supplied with Clone notify actions**
+   :widths: 1 1
 
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | Variable                                     | Description                                                                   |
    +==============================================+===============================================================================+
    | OCF_RESKEY_CRM_meta_notify_type              | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_type              |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_type                                    |
    |                                              |                                                                               |
    |                                              | Allowed values: **pre**, **post**                                             |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_operation         | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_operation         |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_operation                               |
    |                                              |                                                                               |
    |                                              | Allowed values: **start**, **stop**                                           |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_start_resource    | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_start_resource    |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_start_resource                          |
    |                                              |                                                                               |
    |                                              | Resources to be started                                                       |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_stop_resource     | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_resource     |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_stop_resource                           |
    |                                              |                                                                               |
    |                                              | Resources to be stopped                                                       |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_active_resource   | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_active_resource   |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_active_resource                         |
    |                                              |                                                                               |
    |                                              | Resources that are running                                                    |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_inactive_resource | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_inactive_resource |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_inactive_resource                       |
    |                                              |                                                                               |
    |                                              | Resources that are not running                                                |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_start_uname       | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_start_uname       |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_start_uname                             |
    |                                              |                                                                               |
    |                                              | Nodes on which resources will be started                                      |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_stop_uname        | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_stop_uname        |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_stop_uname                              |
    |                                              |                                                                               |
    |                                              | Nodes on which resources will be stopped                                      |
    +----------------------------------------------+-------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_active_uname      | .. index::                                                                    |
    |                                              |    single: environment variable; OCF_RESKEY_CRM_meta_notify_active_uname      |
    |                                              |    single: OCF_RESKEY_CRM_meta_notify_active_uname                            |
    |                                              |                                                                               |
    |                                              | Nodes on which resources are running                                          |
    +----------------------------------------------+-------------------------------------------------------------------------------+
 
 The variables come in pairs, such as
 ``OCF_RESKEY_CRM_meta_notify_start_resource`` and
 ``OCF_RESKEY_CRM_meta_notify_start_uname``, and should be treated as an
 array of whitespace-separated elements.
 
 ``OCF_RESKEY_CRM_meta_notify_inactive_resource`` is an exception, as the
 matching **uname** variable does not exist since inactive resources
 are not running on any node.
 
 Thus, in order to indicate that **clone:0** will be started on **sles-1**,
 **clone:2** will be started on **sles-3**, and **clone:3** will be started
 on **sles-2**, the cluster would set:
    
 .. topic:: Notification variables
 
    .. code-block:: none
 
       OCF_RESKEY_CRM_meta_notify_start_resource="clone:0 clone:2 clone:3"
       OCF_RESKEY_CRM_meta_notify_start_uname="sles-1 sles-3 sles-2"
 
 .. note::
 
    Pacemaker will log but otherwise ignore failures of notify actions.
    
 Interpretation of Notification Variables
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
    
 **Pre-notification (stop):**
    
 * Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource``
 * Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
 * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
    
 **Post-notification (stop) / Pre-notification (start):**
    
 * Active resources
 
     * ``$OCF_RESKEY_CRM_meta_notify_active_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
 
 * Inactive resources
 
     * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` 
 
 * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
    
 **Post-notification (start):**
    
 * Active resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_active_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 
 * Inactive resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 
 * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
    
 Extra Notifications for Promotable Clones
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 .. index::
    single: clone; environment variables
    single: promotable; environment variables
    
 .. table:: **Extra environment variables supplied for promotable clones**
+   :widths: 1 1
 
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | Variable                                       | Description                                                                     |
    +================================================+=================================================================================+
    | OCF_RESKEY_CRM_meta_notify_promoted_resource   | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_resource   |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_promoted_resource                         |
    |                                                |                                                                                 |
    |                                                | Resources that are running in the promoted role                                 |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_unpromoted_resource | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_resource |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_unpromoted_resource                       |
    |                                                |                                                                                 |
    |                                                | Resources that are running in the unpromoted role                               |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_promote_resource    | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_resource    |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_promote_resource                          |
    |                                                |                                                                                 |
    |                                                | Resources to be promoted                                                        |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_demote_resource     | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_resource     |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_demote_resource                           |
    |                                                |                                                                                 |
    |                                                | Resources to be demoted                                                         |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_promote_uname       | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_promote_uname       |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_promote_uname                             |
    |                                                |                                                                                 |
    |                                                | Nodes on which resources will be promoted                                       |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_demote_uname        | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_demote_uname        |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_demote_uname                              |
    |                                                |                                                                                 |
    |                                                | Nodes on which resources will be demoted                                        |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_promoted_uname      | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_promoted_uname      |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_promoted_uname                            |
    |                                                |                                                                                 |
    |                                                | Nodes on which resources are running in the promoted role                       |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    | OCF_RESKEY_CRM_meta_notify_unpromoted_uname    | .. index::                                                                      |
    |                                                |    single: environment variable; OCF_RESKEY_CRM_meta_notify_unpromoted_uname    |
    |                                                |    single: OCF_RESKEY_CRM_meta_notify_unpromoted_uname                          |
    |                                                |                                                                                 |
    |                                                | Nodes on which resources are running in the unpromoted role                     |
    +------------------------------------------------+---------------------------------------------------------------------------------+
    
 Interpretation of Promotable Notification Variables
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
 **Pre-notification (demote):**
 
 * Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource``
 * Promoted resources: ``$OCF_RESKEY_CRM_meta_notify_promoted_resource``
 * Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource``
 * Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
 * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource``
 * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
 
 **Post-notification (demote) / Pre-notification (stop):**
 
 * Active resources: ``$OCF_RESKEY_CRM_meta_notify_active_resource``
 * Promoted resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` 
 
 * Unpromoted resources: ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource``
 * Inactive resources: ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
 * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource``
 * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
 * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
    
 **Post-notification (stop) / Pre-notification (start)**
    
 * Active resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_active_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` 
 
 * Promoted resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` 
 
 * Unpromoted resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` 
 
 * Inactive resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource`` 
 
 * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource``
 * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
 * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
 
 **Post-notification (start) / Pre-notification (promote)**
 
 * Active resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_active_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` 
 
 * Promoted resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource`` 
 
 * Unpromoted resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` 
 
 * Inactive resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource``           
 
 * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource``
 * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
 * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
    
 **Post-notification (promote)**
    
 * Active resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_active_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` 
 
 * Promoted resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_promoted_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_promote_resource``
 
 * Unpromoted resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_unpromoted_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_start_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_promote_resource`` 
 
 * Inactive resources:
 
     * ``$OCF_RESKEY_CRM_meta_notify_inactive_resource``
     * plus ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
     * minus ``$OCF_RESKEY_CRM_meta_notify_start_resource`` 
 
 * Resources to be started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources to be promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource``
 * Resources to be demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources to be stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
 * Resources that were started: ``$OCF_RESKEY_CRM_meta_notify_start_resource``
 * Resources that were promoted: ``$OCF_RESKEY_CRM_meta_notify_promote_resource``
 * Resources that were demoted: ``$OCF_RESKEY_CRM_meta_notify_demote_resource``
 * Resources that were stopped: ``$OCF_RESKEY_CRM_meta_notify_stop_resource``
    
 Monitoring Promotable Clone Resources
 _____________________________________
 
 The usual monitor actions are insufficient to monitor a promotable clone
 resource, because Pacemaker needs to verify not only that the resource is
 active, but also that its actual role matches its intended one.
 
 Define two monitoring actions: the usual one will cover the unpromoted role,
 and an additional one with ``role="Promoted"`` will cover the promoted role.
    
 .. topic:: Monitoring both states of a promotable clone resource
 
    .. code-block:: xml
 
       <clone id="myPromotableRsc">
          <meta_attributes id="myPromotableRsc-meta">
              <nvpair name="promotable" value="true"/>
          </meta_attributes>
          <primitive id="myRsc" class="ocf" type="myApp" provider="myCorp">
           <operations>
            <op id="public-ip-unpromoted-check" name="monitor" interval="60"/>
            <op id="public-ip-promoted-check" name="monitor" interval="61" role="Promoted"/>
           </operations>
          </primitive>
       </clone> 
    
 .. important::
 
    It is crucial that *every* monitor operation has a different interval!
    Pacemaker currently differentiates between operations
    only by resource and interval; so if (for example) a promotable clone resource
    had the same monitor interval for both roles, Pacemaker would ignore the
    role when checking the status -- which would cause unexpected return
    codes, and therefore unnecessary complications.
    
 .. _s-promotion-scores:
 
 Determining Which Instance is Promoted
 ______________________________________
 
 Pacemaker can choose a promotable clone instance to be promoted in one of two
 ways:
 
 * Promotion scores: These are node attributes set via the ``crm_attribute``
   command using the ``--promotion`` option, which generally would be called by
   the resource agent's start action if it supports promotable clones. This tool
   automatically detects both the resource and host, and should be used to set a
   preference for being promoted. Based on this, ``promoted-max``, and
   ``promoted-node-max``, the instance(s) with the highest preference will be
   promoted.
 
 * Constraints: Location constraints can indicate which nodes are most preferred
   to be promoted.
    
 .. topic:: Explicitly preferring node1 to be promoted
 
    .. code-block:: xml
 
       <rsc_location id="promoted-location" rsc="myPromotableRsc">
           <rule id="promoted-rule" score="100" role="Promoted">
             <expression id="promoted-exp" attribute="#uname" operation="eq" value="node1"/>
           </rule>
       </rsc_location> 
 
 .. index:
    single: bundle resource
    single: resource; bundle
    pair: container; Docker
    pair: container; podman
    pair: container; rkt
    
 .. _s-resource-bundle:
 
 Bundles - Containerized Resources
 #################################
 
 Pacemaker supports a special syntax for launching a service inside a
 `container <https://en.wikipedia.org/wiki/Operating-system-level_virtualization>`_
 with any infrastructure it requires: the *bundle*.
    
 Pacemaker bundles support `Docker <https://www.docker.com/>`_,
 `podman <https://podman.io/>`_ *(since 2.0.1)*, and
 `rkt <https://coreos.com/rkt/>`_ container technologies. [#]_
    
 .. topic:: A bundle for a containerized web server
 
    .. code-block:: xml
 
       <bundle id="httpd-bundle">
          <podman image="pcmk:http" replicas="3"/>
          <network ip-range-start="192.168.122.131"
                   host-netmask="24"
                   host-interface="eth0">
             <port-mapping id="httpd-port" port="80"/>
             </network>
          <storage>
             <storage-mapping id="httpd-syslog"
                              source-dir="/dev/log"
                              target-dir="/dev/log"
                              options="rw"/>
             <storage-mapping id="httpd-root"
                              source-dir="/srv/html"
                              target-dir="/var/www/html"
                              options="rw,Z"/>
             <storage-mapping id="httpd-logs"
                              source-dir-root="/var/log/pacemaker/bundles"
                              target-dir="/etc/httpd/logs"
                              options="rw,Z"/>
          </storage>
          <primitive class="ocf" id="httpd" provider="heartbeat" type="apache"/>
       </bundle>
 
 .. index:
    single: bundle resource
    single: resource; bundle
    
 Bundle Prerequisites
 ____________________
    
 Before configuring a bundle in Pacemaker, the user must install the appropriate
 container launch technology (Docker, podman, or rkt), and supply a fully
 configured container image, on every node allowed to run the bundle.
 
 Pacemaker will create an implicit resource of type **ocf:heartbeat:docker**,
 **ocf:heartbeat:podman**, or **ocf:heartbeat:rkt** to manage a bundle's
 container. The user must ensure that the appropriate resource agent is
 installed on every node allowed to run the bundle.
 
 .. index::
    pair: XML element; bundle
    
 Bundle Properties
 _________________
    
 .. table:: **XML Attributes of a bundle Element**
+   :widths: 1 4
 
    +-------------+-----------------------------------------------+
    | Attribute   | Description                                   |
    +=============+===============================================+
    | id          | .. index::                                    |
    |             |    single: bundle; attribute, id              |
    |             |    single: attribute; id (bundle)             |
    |             |    single: id; bundle attribute               |
    |             |                                               |
    |             | A unique name for the bundle (required)       |
    +-------------+-----------------------------------------------+
    | description | .. index::                                    |
    |             |    single: bundle; attribute, description     |
    |             |    single: attribute; description (bundle)    |
    |             |    single: description; bundle attribute      |
    |             |                                               |
    |             | Arbitrary text (not used by Pacemaker)        |
    +-------------+-----------------------------------------------+
    
 A bundle must contain exactly one ``docker``, ``podman``, or ``rkt`` element.
 
 .. index::
    pair: XML element; docker
    pair: XML element; podman
    pair: XML element; rkt
    single: resource; bundle
    
 Bundle Container Properties
 ___________________________
    
 .. table:: **XML attributes of a docker, podman, or rkt Element**
+   :class: longtable
+   :widths: 2 3 4
    
    +-------------------+------------------------------------+---------------------------------------------------+
    | Attribute         | Default                            | Description                                       |
    +===================+====================================+===================================================+
    | image             |                                    | .. index::                                        |
    |                   |                                    |    single: docker; attribute, image               |
    |                   |                                    |    single: attribute; image (docker)              |
    |                   |                                    |    single: image; docker attribute                |
    |                   |                                    |    single: podman; attribute, image               |
    |                   |                                    |    single: attribute; image (podman)              |
    |                   |                                    |    single: image; podman attribute                |
    |                   |                                    |    single: rkt; attribute, image                  |
    |                   |                                    |    single: attribute; image (rkt)                 |
    |                   |                                    |    single: image; rkt attribute                   |
    |                   |                                    |                                                   |
    |                   |                                    | Container image tag (required)                    |
    +-------------------+------------------------------------+---------------------------------------------------+
    | replicas          | Value of ``promoted-max``          | .. index::                                        |
    |                   | if that is positive, else 1        |    single: docker; attribute, replicas            |
    |                   |                                    |    single: attribute; replicas (docker)           |
    |                   |                                    |    single: replicas; docker attribute             |
    |                   |                                    |    single: podman; attribute, replicas            |
    |                   |                                    |    single: attribute; replicas (podman)           |
    |                   |                                    |    single: replicas; podman attribute             |
    |                   |                                    |    single: rkt; attribute, replicas               |
    |                   |                                    |    single: attribute; replicas (rkt)              |
    |                   |                                    |    single: replicas; rkt attribute                |
    |                   |                                    |                                                   |
    |                   |                                    | A positive integer specifying the number of       |
    |                   |                                    | container instances to launch                     |
    +-------------------+------------------------------------+---------------------------------------------------+
    | replicas-per-host | 1                                  | .. index::                                        |
    |                   |                                    |    single: docker; attribute, replicas-per-host   |
    |                   |                                    |    single: attribute; replicas-per-host (docker)  |
    |                   |                                    |    single: replicas-per-host; docker attribute    |
    |                   |                                    |    single: podman; attribute, replicas-per-host   |
    |                   |                                    |    single: attribute; replicas-per-host (podman)  |
    |                   |                                    |    single: replicas-per-host; podman attribute    |
    |                   |                                    |    single: rkt; attribute, replicas-per-host      |
    |                   |                                    |    single: attribute; replicas-per-host (rkt)     |
    |                   |                                    |    single: replicas-per-host; rkt attribute       |
    |                   |                                    |                                                   |
    |                   |                                    | A positive integer specifying the number of       |
    |                   |                                    | container instances allowed to run on a           |
    |                   |                                    | single node                                       |
    +-------------------+------------------------------------+---------------------------------------------------+
    | promoted-max      | 0                                  | .. index::                                        |
    |                   |                                    |    single: docker; attribute, promoted-max        |
    |                   |                                    |    single: attribute; promoted-max (docker)       |
    |                   |                                    |    single: promoted-max; docker attribute         |
    |                   |                                    |    single: podman; attribute, promoted-max        |
    |                   |                                    |    single: attribute; promoted-max (podman)       |
    |                   |                                    |    single: promoted-max; podman attribute         |
    |                   |                                    |    single: rkt; attribute, promoted-max           |
    |                   |                                    |    single: attribute; promoted-max (rkt)          |
    |                   |                                    |    single: promoted-max; rkt attribute            |
    |                   |                                    |                                                   |
    |                   |                                    | A non-negative integer that, if positive,         |
    |                   |                                    | indicates that the containerized service          |
    |                   |                                    | should be treated as a promotable service,        |
    |                   |                                    | with this many replicas allowed to run the        |
    |                   |                                    | service in the promoted role                      |
    +-------------------+------------------------------------+---------------------------------------------------+
    | network           |                                    | .. index::                                        |
    |                   |                                    |    single: docker; attribute, network             |
    |                   |                                    |    single: attribute; network (docker)            |
    |                   |                                    |    single: network; docker attribute              |
    |                   |                                    |    single: podman; attribute, network             |
    |                   |                                    |    single: attribute; network (podman)            |
    |                   |                                    |    single: network; podman attribute              |
    |                   |                                    |    single: rkt; attribute, network                |
    |                   |                                    |    single: attribute; network (rkt)               |
    |                   |                                    |    single: network; rkt attribute                 |
    |                   |                                    |                                                   |
    |                   |                                    | If specified, this will be passed to the          |
    |                   |                                    | ``docker run``, ``podman run``, or                |
    |                   |                                    | ``rkt run`` command as the network setting        |
    |                   |                                    | for the container.                                |
    +-------------------+------------------------------------+---------------------------------------------------+
    | run-command       | ``/usr/sbin/pacemaker-remoted`` if | .. index::                                        |
    |                   | bundle contains a **primitive**,   |    single: docker; attribute, run-command         |
    |                   | otherwise none                     |    single: attribute; run-command (docker)        |
    |                   |                                    |    single: run-command; docker attribute          |
    |                   |                                    |    single: podman; attribute, run-command         |
    |                   |                                    |    single: attribute; run-command (podman)        |
    |                   |                                    |    single: run-command; podman attribute          |
    |                   |                                    |    single: rkt; attribute, run-command            |
    |                   |                                    |    single: attribute; run-command (rkt)           |
    |                   |                                    |    single: run-command; rkt attribute             |
    |                   |                                    |                                                   |
    |                   |                                    | This command will be run inside the container     |
    |                   |                                    | when launching it ("PID 1"). If the bundle        |
    |                   |                                    | contains a **primitive**, this command *must*     |
    |                   |                                    | start ``pacemaker-remoted`` (but could, for       |
    |                   |                                    | example, be a script that does other stuff, too). |
    +-------------------+------------------------------------+---------------------------------------------------+
    | options           |                                    | .. index::                                        |
    |                   |                                    |    single: docker; attribute, options             |
    |                   |                                    |    single: attribute; options (docker)            |
    |                   |                                    |    single: options; docker attribute              |
    |                   |                                    |    single: podman; attribute, options             |
    |                   |                                    |    single: attribute; options (podman)            |
    |                   |                                    |    single: options; podman attribute              |
    |                   |                                    |    single: rkt; attribute, options                |
    |                   |                                    |    single: attribute; options (rkt)               |
    |                   |                                    |    single: options; rkt attribute                 |
    |                   |                                    |                                                   |
    |                   |                                    | Extra command-line options to pass to the         |
    |                   |                                    | ``docker run``, ``podman run``, or ``rkt run``    |
    |                   |                                    | command                                           |
    +-------------------+------------------------------------+---------------------------------------------------+
    
 .. note::
 
    Considerations when using cluster configurations or container images from
    Pacemaker 1.1:
    
    * If the container image has a pre-2.0.0 version of Pacemaker, set ``run-command``
      to ``/usr/sbin/pacemaker_remoted`` (note the underbar instead of dash).
    
    * ``masters`` is accepted as an alias for ``promoted-max``, but is deprecated since
      2.0.0, and support for it will be removed in a future version.
 
 Bundle Network Properties
 _________________________
    
 A bundle may optionally contain one ``<network>`` element.
 
 .. index::
    pair: XML element; network
    single: resource; bundle
    single: bundle; networking
    
-.. topic:: **XML attributes of a network Element**
+.. table:: **XML attributes of a network Element**
+   :widths: 2 1 5
    
    +----------------+---------+------------------------------------------------------------+
    | Attribute      | Default | Description                                                |
    +================+=========+============================================================+
    | add-host       | TRUE    | .. index::                                                 |
    |                |         |    single: network; attribute, add-host                    |
    |                |         |    single: attribute; add-host (network)                   |
    |                |         |    single: add-host; network attribute                     |
    |                |         |                                                            |
    |                |         | If TRUE, and ``ip-range-start`` is used, Pacemaker will    |
    |                |         | automatically ensure that ``/etc/hosts`` inside the        |
    |                |         | containers has entries for each                            |
    |                |         | :ref:`replica name <s-resource-bundle-note-replica-names>` |
    |                |         | and its assigned IP.                                       |
    +----------------+---------+------------------------------------------------------------+
    | ip-range-start |         | .. index::                                                 |
    |                |         |    single: network; attribute, ip-range-start              |
    |                |         |    single: attribute; ip-range-start (network)             |
    |                |         |    single: ip-range-start; network attribute               |
    |                |         |                                                            |
    |                |         | If specified, Pacemaker will create an implicit            |
    |                |         | ``ocf:heartbeat:IPaddr2`` resource for each container      |
    |                |         | instance, starting with this IP address, using up to       |
    |                |         | ``replicas`` sequential addresses. These addresses can be  |
    |                |         | used from the host's network to reach the service inside   |
    |                |         | the container, though it is not visible within the         |
    |                |         | container itself. Only IPv4 addresses are currently        |
    |                |         | supported.                                                 |
    +----------------+---------+------------------------------------------------------------+
    | host-netmask   | 32      | .. index::                                                 |
    |                |         |    single: network; attribute; host-netmask                |
    |                |         |    single: attribute; host-netmask (network)               |
    |                |         |    single: host-netmask; network attribute                 |
    |                |         |                                                            |
    |                |         | If ``ip-range-start`` is specified, the IP addresses       |
    |                |         | are created with this CIDR netmask (as a number of bits).  |
    +----------------+---------+------------------------------------------------------------+
    | host-interface |         | .. index::                                                 |
    |                |         |    single: network; attribute; host-interface              |
    |                |         |    single: attribute; host-interface (network)             |
    |                |         |    single: host-interface; network attribute               |
    |                |         |                                                            |
    |                |         | If ``ip-range-start`` is specified, the IP addresses are   |
    |                |         | created on this host interface (by default, it will be     |
    |                |         | determined from the IP address).                           |
    +----------------+---------+------------------------------------------------------------+
    | control-port   | 3121    | .. index::                                                 |
    |                |         |    single: network; attribute; control-port                |
    |                |         |    single: attribute; control-port (network)               |
    |                |         |    single: control-port; network attribute                 |
    |                |         |                                                            |
    |                |         | If the bundle contains a ``primitive``, the cluster will   |
    |                |         | use this integer TCP port for communication with           |
    |                |         | Pacemaker Remote inside the container. Changing this is    |
    |                |         | useful when the container is unable to listen on the       |
    |                |         | default port, for example, when the container uses the     |
    |                |         | host's network rather than ``ip-range-start`` (in which    |
    |                |         | case ``replicas-per-host`` must be 1), or when the bundle  |
    |                |         | may run on a Pacemaker Remote node that is already         |
    |                |         | listening on the default port. Any ``PCMK_remote_port``    |
    |                |         | environment variable set on the host or in the container   |
    |                |         | is ignored for bundle connections.                         |
    +----------------+---------+------------------------------------------------------------+
    
 .. _s-resource-bundle-note-replica-names:
 
 .. note::
 
    Replicas are named by the bundle id plus a dash and an integer counter starting
    with zero. For example, if a bundle named **httpd-bundle** has **replicas=2**, its
    containers will be named **httpd-bundle-0** and **httpd-bundle-1**.
 
 .. index::
    pair: XML element; port-mapping
    
 Additionally, a ``network`` element may optionally contain one or more
 ``port-mapping`` elements.
    
 .. table:: **Attributes of a port-mapping Element**
+   :widths: 2 1 5
    
    +---------------+-------------------+------------------------------------------------------+
    | Attribute     | Default           | Description                                          |
    +===============+===================+======================================================+
    | id            |                   | .. index::                                           |
    |               |                   |    single: port-mapping; attribute, id               |
    |               |                   |    single: attribute; id (port-mapping)              |
    |               |                   |    single: id; port-mapping attribute                |
    |               |                   |                                                      |
    |               |                   | A unique name for the port mapping (required)        |
    +---------------+-------------------+------------------------------------------------------+
    | port          |                   | .. index::                                           |
    |               |                   |    single: port-mapping; attribute, port             |
    |               |                   |    single: attribute; port (port-mapping)            |
    |               |                   |    single: port; port-mapping attribute              |
    |               |                   |                                                      |
    |               |                   | If this is specified, connections to this TCP port   |
    |               |                   | number on the host network (on the container's       |
    |               |                   | assigned IP address, if ``ip-range-start`` is        |
    |               |                   | specified) will be forwarded to the container        |
    |               |                   | network. Exactly one of ``port`` or ``range``        |
    |               |                   | must be specified in a ``port-mapping``.             |
    +---------------+-------------------+------------------------------------------------------+
    | internal-port | value of ``port`` | .. index::                                           |
    |               |                   |    single: port-mapping; attribute, internal-port    |
    |               |                   |    single: attribute; internal-port (port-mapping)   |
    |               |                   |    single: internal-port; port-mapping attribute     |
    |               |                   |                                                      |
    |               |                   | If ``port`` and this are specified, connections      |
    |               |                   | to ``port`` on the host's network will be            |
    |               |                   | forwarded to this port on the container network.     |
    +---------------+-------------------+------------------------------------------------------+
    | range         |                   | .. index::                                           |
    |               |                   |    single: port-mapping; attribute, range            |
    |               |                   |    single: attribute; range (port-mapping)           |
    |               |                   |    single: range; port-mapping attribute             |
    |               |                   |                                                      |
    |               |                   | If this is specified, connections to these TCP       |
    |               |                   | port numbers (expressed as *first_port*-*last_port*) |
    |               |                   | on the host network (on the container's assigned IP  |
    |               |                   | address, if ``ip-range-start`` is specified) will    |
    |               |                   | be forwarded to the same ports in the container      |
    |               |                   | network. Exactly one of ``port`` or ``range``        |
    |               |                   | must be specified in a ``port-mapping``.             |
    +---------------+-------------------+------------------------------------------------------+
 
 .. note::
 
    If the bundle contains a ``primitive``, Pacemaker will automatically map the
    ``control-port``, so it is not necessary to specify that port in a
    ``port-mapping``.
 
 .. index:
    pair: XML element; storage
    pair: XML element; storage-mapping
    single: resource; bundle
    
 .. _s-bundle-storage:
 
 Bundle Storage Properties
 _________________________
    
 A bundle may optionally contain one ``storage`` element. A ``storage`` element
 has no properties of its own, but may contain one or more ``storage-mapping``
 elements.
    
 .. table:: **Attributes of a storage-mapping Element**
+   :widths: 2 1 5
    
    +-----------------+---------+-------------------------------------------------------------+
    | Attribute       | Default | Description                                                 |
    +=================+=========+=============================================================+
    | id              |         | .. index::                                                  |
    |                 |         |    single: storage-mapping; attribute, id                   |
    |                 |         |    single: attribute; id (storage-mapping)                  |
    |                 |         |    single: id; storage-mapping attribute                    |
    |                 |         |                                                             |
    |                 |         | A unique name for the storage mapping (required)            |
    +-----------------+---------+-------------------------------------------------------------+
    | source-dir      |         | .. index::                                                  |
    |                 |         |    single: storage-mapping; attribute, source-dir           |
    |                 |         |    single: attribute; source-dir (storage-mapping)          |
    |                 |         |    single: source-dir; storage-mapping attribute            |
    |                 |         |                                                             |
    |                 |         | The absolute path on the host's filesystem that will be     |
    |                 |         | mapped into the container. Exactly one of ``source-dir``    |
    |                 |         | and ``source-dir-root`` must be specified in a              |
    |                 |         | ``storage-mapping``.                                        |
    +-----------------+---------+-------------------------------------------------------------+
    | source-dir-root |         | .. index::                                                  |
    |                 |         |    single: storage-mapping; attribute, source-dir-root      |
    |                 |         |    single: attribute; source-dir-root (storage-mapping)     |
    |                 |         |    single: source-dir-root; storage-mapping attribute       |
    |                 |         |                                                             |
    |                 |         | The start of a path on the host's filesystem that will      |
    |                 |         | be mapped into the container, using a different             |
    |                 |         | subdirectory on the host for each container instance.       |
    |                 |         | The subdirectory will be named the same as the              |
    |                 |         | :ref:`replica name <s-resource-bundle-note-replica-names>`. |
    |                 |         | Exactly one of ``source-dir`` and ``source-dir-root``       |
    |                 |         | must be specified in a ``storage-mapping``.                 |
    +-----------------+---------+-------------------------------------------------------------+
    | target-dir      |         | .. index::                                                  |
    |                 |         |    single: storage-mapping; attribute, target-dir           |
    |                 |         |    single: attribute; target-dir (storage-mapping)          |
    |                 |         |    single: target-dir; storage-mapping attribute            |
    |                 |         |                                                             |
    |                 |         | The path name within the container where the host           |
    |                 |         | storage will be mapped (required)                           |
    +-----------------+---------+-------------------------------------------------------------+
    | options         |         | .. index::                                                  |
    |                 |         |    single: storage-mapping; attribute, options              |
    |                 |         |    single: attribute; options (storage-mapping)             |
    |                 |         |    single: options; storage-mapping attribute               |
    |                 |         |                                                             |
    |                 |         | A comma-separated list of file system mount                 |
    |                 |         | options to use when mapping the storage                     |
    +-----------------+---------+-------------------------------------------------------------+
    
 .. note::
 
    Pacemaker does not define the behavior if the source directory does not already
    exist on the host. However, it is expected that the container technology and/or
    its resource agent will create the source directory in that case.
    
 .. note::
 
    If the bundle contains a ``primitive``,
    Pacemaker will automatically map the equivalent of
    ``source-dir=/etc/pacemaker/authkey target-dir=/etc/pacemaker/authkey``
    and ``source-dir-root=/var/log/pacemaker/bundles target-dir=/var/log`` into the
    container, so it is not necessary to specify those paths in a
    ``storage-mapping``.
    
 .. important::
 
    The ``PCMK_authkey_location`` environment variable must not be set to anything
    other than the default of ``/etc/pacemaker/authkey`` on any node in the cluster.
    
 .. important::
 
    If SELinux is used in enforcing mode on the host, you must ensure the container
    is allowed to use any storage you mount into it. For Docker and podman bundles,
    adding "Z" to the mount options will create a container-specific label for the
    mount that allows the container access.
 
 .. index::
    single: resource; bundle
    
 Bundle Primitive
 ________________
    
 A bundle may optionally contain one :ref:`primitive <primitive-resource>`
 resource. The primitive may have operations, instance attributes, and
 meta-attributes defined, as usual.
 
 If a bundle contains a primitive resource, the container image must include
 the Pacemaker Remote daemon, and at least one of ``ip-range-start`` or
 ``control-port`` must be configured in the bundle. Pacemaker will create an
 implicit **ocf:pacemaker:remote** resource for the connection, launch
 Pacemaker Remote within the container, and monitor and manage the primitive
 resource via Pacemaker Remote.
 
 If the bundle has more than one container instance (replica), the primitive
 resource will function as an implicit :ref:`clone <s-resource-clone>` -- a
 :ref:`promotable clone <s-resource-promotable>` if the bundle has ``promoted-max``
 greater than zero.
     
 .. note::
 
    If you want to pass environment variables to a bundle's Pacemaker Remote
    connection or primitive, you have two options:
    
    * Environment variables whose value is the same regardless of the underlying host
      may be set using the container element's ``options`` attribute.
    * If you want variables to have host-specific values, you can use the
      :ref:`storage-mapping <s-bundle-storage>` element to map a file on the host as
      ``/etc/pacemaker/pcmk-init.env`` in the container *(since 2.0.3)*.
      Pacemaker Remote will parse this file as a shell-like format, with
      variables set as NAME=VALUE, ignoring blank lines and comments starting
      with "#".
    
 .. important::
 
    When a bundle has a ``primitive``, Pacemaker on all cluster nodes must be able to
    contact Pacemaker Remote inside the bundle's containers.
    
    * The containers must have an accessible network (for example, ``network`` should
      not be set to "none" with a ``primitive``).
    * The default, using a distinct network space inside the container, works in
      combination with ``ip-range-start``. Any firewall must allow access from all
      cluster nodes to the ``control-port`` on the container IPs.
    * If the container shares the host's network space (for example, by setting
      ``network`` to "host"), a unique ``control-port`` should be specified for each
      bundle. Any firewall must allow access from all cluster nodes to the
      ``control-port`` on all cluster and remote node IPs.
    
 .. index::
    single: resource; bundle
 
 .. _s-bundle-attributes:
 
 Bundle Node Attributes
 ______________________
    
 If the bundle has a ``primitive``, the primitive's resource agent may want to set
 node attributes such as :ref:`promotion scores <s-promotion-scores>`. However, with
 containers, it is not apparent which node should get the attribute.
 
 If the container uses shared storage that is the same no matter which node the
 container is hosted on, then it is appropriate to use the promotion score on the
 bundle node itself.
 
 On the other hand, if the container uses storage exported from the underlying host,
 then it may be more appropriate to use the promotion score on the underlying host.
 
 Since this depends on the particular situation, the
 ``container-attribute-target`` resource meta-attribute allows the user to specify
 which approach to use. If it is set to ``host``, then user-defined node attributes
 will be checked on the underlying host. If it is anything else, the local node
 (in this case the bundle node) is used as usual.
 
 This only applies to user-defined attributes; the cluster will always check the
 local node for cluster-defined attributes such as ``#uname``.
 
 If ``container-attribute-target`` is ``host``, the cluster will pass additional
 environment variables to the primitive's resource agent that allow it to set
 node attributes appropriately: ``CRM_meta_container_attribute_target`` (identical
 to the meta-attribute value) and ``CRM_meta_physical_host`` (the name of the
 underlying host).
    
 .. note::
 
    When called by a resource agent, the ``attrd_updater`` and ``crm_attribute``
    commands will automatically check those environment variables and set
    attributes appropriately.
    
 .. index::
    single: resource; bundle
 
 Bundle Meta-Attributes
 ______________________
    
 Any meta-attribute set on a bundle will be inherited by the bundle's
 primitive and any resources implicitly created by Pacemaker for the bundle.
 
 This includes options such as ``priority``, ``target-role``, and ``is-managed``. See
 :ref:`resource_options` for more information.
    
 Limitations of Bundles
 ______________________
    
 Restarting pacemaker while a bundle is unmanaged or the cluster is in
 maintenance mode may cause the bundle to fail.
 
 Bundles may not be explicitly cloned or included in groups. This includes the
 bundle's primitive and any resources implicitly created by Pacemaker for the
 bundle. (If ``replicas`` is greater than 1, the bundle will behave like a clone
 implicitly.)
 
 Bundles do not have instance attributes, utilization attributes, or operations,
 though a bundle's primitive may have them.
 
 A bundle with a primitive can run on a Pacemaker Remote node only if the bundle
 uses a distinct ``control-port``.
 
 .. [#] Of course, the service must support running multiple instances.
 
 .. [#] Docker is a trademark of Docker, Inc. No endorsement by or association with
    Docker, Inc. is implied.
diff --git a/doc/sphinx/Pacemaker_Explained/alerts.rst b/doc/sphinx/Pacemaker_Explained/alerts.rst
index 7db6a0d6df..03e6622225 100644
--- a/doc/sphinx/Pacemaker_Explained/alerts.rst
+++ b/doc/sphinx/Pacemaker_Explained/alerts.rst
@@ -1,497 +1,501 @@
 .. index::
    single: alert
    single: resource; alert
    single: node; alert
    single: fencing; alert
    pair: XML element; alert
    pair: XML element; alerts
 
 Alerts
 ------
 
 *Alerts* may be configured to take some external action when a cluster event
 occurs (node failure, resource starting or stopping, etc.).
 
 
 .. index::
    pair: alert; agent
 
 Alert Agents
 ############
 
 As with resource agents, the cluster calls an external program (an
 *alert agent*) to handle alerts. The cluster passes information about the event
 to the agent via environment variables. Agents can do anything desired with
 this information (send an e-mail, log to a file, update a monitoring system,
 etc.).
 
 .. topic:: Simple alert configuration
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="my-alert" path="/path/to/my-script.sh" />
          </alerts>
       </configuration>
 
 In the example above, the cluster will call ``my-script.sh`` for each event.
 
 Multiple alert agents may be configured; the cluster will call all of them for
 each event.
 
 Alert agents will be called only on cluster nodes. They will be called for
 events involving Pacemaker Remote nodes, but they will never be called *on*
 those nodes.
    
 
 .. index::
    single: alert; recipient
    pair: XML element; recipient
 
 Alert Recipients
 ################
    
 Usually, alerts are directed towards a recipient. Thus, each alert may be
 additionally configured with one or more recipients. The cluster will call the
 agent separately for each recipient.
    
 .. topic:: Alert configuration with recipient
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="my-alert" path="/path/to/my-script.sh">
                 <recipient id="my-alert-recipient" value="some-address"/>
             </alert>
          </alerts>
       </configuration>
    
 In the above example, the cluster will call ``my-script.sh`` for each event,
 passing the recipient ``some-address`` as an environment variable.
 
 The recipient may be anything the alert agent can recognize -- an IP address,
 an e-mail address, a file name, whatever the particular agent supports.
    
    
 .. index::
    single: alert; meta-attributes
    single: meta-attribute; alert meta-attributes
 
 Alert Meta-Attributes
 #####################
    
 As with resource agents, meta-attributes can be configured for alert agents
 to affect how Pacemaker calls them.
    
 .. table:: **Meta-Attributes of an Alert**
+   :class: longtable
+   :widths: 1 1 3
    
    +------------------+---------------+-----------------------------------------------------+
    | Meta-Attribute   | Default       | Description                                         |
    +==================+===============+=====================================================+
    | timestamp-format | %H:%M:%S.%06N | .. index::                                          |
    |                  |               |    single: alert; meta-attribute, timestamp-format  |
    |                  |               |    single: meta-attribute; timestamp-format (alert) |
    |                  |               |    single: timestamp-format; alert meta-attribute   |
    |                  |               |                                                     |
    |                  |               | Format the cluster will use when sending the        |
    |                  |               | event's timestamp to the agent. This is a string as |
    |                  |               | used with the ``date(1)`` command.                  |
    +------------------+---------------+-----------------------------------------------------+
    | timeout          | 30s           | .. index::                                          |
    |                  |               |    single: alert; meta-attribute, timeout           |
    |                  |               |    single: meta-attribute; timeout (alert)          |
    |                  |               |    single: timeout; alert meta-attribute            |
    |                  |               |                                                     |
    |                  |               | If the alert agent does not complete within this    |
    |                  |               | amount of time, it will be terminated.              |
    +------------------+---------------+-----------------------------------------------------+
    
 Meta-attributes can be configured per alert agent and/or per recipient.
    
 .. topic:: Alert configuration with meta-attributes
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="my-alert" path="/path/to/my-script.sh">
                <meta_attributes id="my-alert-attributes">
                   <nvpair id="my-alert-attributes-timeout" name="timeout"
                           value="15s"/>
                </meta_attributes>
                <recipient id="my-alert-recipient1" value="someuser@example.com">
                   <meta_attributes id="my-alert-recipient1-attributes">
                      <nvpair id="my-alert-recipient1-timestamp-format"
                              name="timestamp-format" value="%D %H:%M"/>
                   </meta_attributes>
                </recipient>
                <recipient id="my-alert-recipient2" value="otheruser@example.com">
                   <meta_attributes id="my-alert-recipient2-attributes">
                      <nvpair id="my-alert-recipient2-timestamp-format"
                              name="timestamp-format" value="%c"/>
                   </meta_attributes>
                </recipient>
             </alert>
          </alerts>
       </configuration>
    
 In the above example, the ``my-script.sh`` will get called twice for each
 event, with each call using a 15-second timeout. One call will be passed the
 recipient ``someuser@example.com`` and a timestamp in the format ``%D %H:%M``,
 while the other call will be passed the recipient ``otheruser@example.com`` and
 a timestamp in the format ``%c``.
    
    
 .. index::
    single: alert; instance attributes
    single: instance attribute; alert instance attributes
 
 Alert Instance Attributes
 #########################
    
 As with resource agents, agent-specific configuration values may be configured
 as instance attributes. These will be passed to the agent as additional
 environment variables. The number, names and allowed values of these instance
 attributes are completely up to the particular agent.
    
 .. topic:: Alert configuration with instance attributes
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="my-alert" path="/path/to/my-script.sh">
                <meta_attributes id="my-alert-attributes">
                   <nvpair id="my-alert-attributes-timeout" name="timeout"
                           value="15s"/>
                </meta_attributes>
                <instance_attributes id="my-alert-options">
                    <nvpair id="my-alert-options-debug" name="debug"
                            value="false"/>
                </instance_attributes>
                <recipient id="my-alert-recipient1"
                           value="someuser@example.com"/>
             </alert>
          </alerts>
       </configuration>
    
    
 .. index::
    single: alert; filters
    pair: XML element; select
    pair: XML element; select_nodes
    pair: XML element; select_fencing
    pair: XML element; select_resources
    pair: XML element; select_attributes
    pair: XML element; attribute
 
 Alert Filters
 #############
    
 By default, an alert agent will be called for node events, fencing events, and
 resource events. An agent may choose to ignore certain types of events, but
 there is still the overhead of calling it for those events. To eliminate that
 overhead, you may select which types of events the agent should receive.
    
 .. topic:: Alert configuration to receive only node events and fencing events
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="my-alert" path="/path/to/my-script.sh">
                <select>
                   <select_nodes />
                   <select_fencing />
                </select>
                <recipient id="my-alert-recipient1"
                           value="someuser@example.com"/>
             </alert>
          </alerts>
       </configuration>
    
 The possible options within ``<select>`` are ``<select_nodes>``,
 ``<select_fencing>``, ``<select_resources>``, and ``<select_attributes>``.
 
 With ``<select_attributes>`` (the only event type not enabled by default), the
 agent will receive alerts when a node attribute changes. If you wish the agent
 to be called only when certain attributes change, you can configure that as well.
    
 .. topic:: Alert configuration to be called when certain node attributes change
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="my-alert" path="/path/to/my-script.sh">
                <select>
                   <select_attributes>
                      <attribute id="alert-standby" name="standby" />
                      <attribute id="alert-shutdown" name="shutdown" />
                   </select_attributes>
                </select>
                <recipient id="my-alert-recipient1" value="someuser@example.com"/>
             </alert>
          </alerts>
       </configuration>
    
 Node attribute alerts are currently considered experimental. Alerts may be
 limited to attributes set via ``attrd_updater``, and agents may be called
 multiple times with the same attribute value.
    
 .. index::
    single: alert; sample agents
 
 Using the Sample Alert Agents
 #############################
    
 Pacemaker provides several sample alert agents, installed in
 ``/usr/share/pacemaker/alerts`` by default.
    
 While these sample scripts may be copied and used as-is, they are provided
 mainly as templates to be edited to suit your purposes. See their source code
 for the full set of instance attributes they support.
    
 .. topic:: Sending cluster events as SNMP traps
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="snmp_alert" path="/path/to/alert_snmp.sh">
                <instance_attributes id="config_for_alert_snmp">
                   <nvpair id="trap_node_states" name="trap_node_states"
                           value="all"/>
                </instance_attributes>
                <meta_attributes id="config_for_timestamp">
                   <nvpair id="ts_fmt" name="timestamp-format"
                           value="%Y-%m-%d,%H:%M:%S.%01N"/>
                </meta_attributes>
                <recipient id="snmp_destination" value="192.168.1.2"/>
             </alert>
          </alerts>
       </configuration>
    
 .. topic:: Sending cluster events as e-mails
 
    .. code-block:: xml
 
       <configuration>
          <alerts>
             <alert id="smtp_alert" path="/path/to/alert_smtp.sh">
                <instance_attributes id="config_for_alert_smtp">
                   <nvpair id="email_sender" name="email_sender"
                           value="donotreply@example.com"/>
                </instance_attributes>
                <recipient id="smtp_destination" value="admin@example.com"/>
             </alert>
          </alerts>
       </configuration>
    
    
 Writing an Alert Agent
 ######################
    
 .. index::
    single: alert; environment variables
    single: environment variable; alert agents
 
 .. table:: **Environment variables passed to alert agents**
+   :class: longtable
+   :widths: 1 3
    
    +---------------------------+----------------------------------------------------------------+
    | Environment Variable      | Description                                                    |
    +===========================+================================================================+
    | CRM_alert_kind            | .. index::                                                     | 
    |                           |   single:environment variable; CRM_alert_kind                  |
    |                           |   single:CRM_alert_kind                                        |
    |                           |                                                                |
    |                           | The type of alert (``node``, ``fencing``, ``resource``, or     |
    |                           | ``attribute``)                                                 |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_node            | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_node                  |
    |                           |   single:CRM_alert_node                                        |
    |                           |                                                                |
    |                           | Name of affected node                                          |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_node_sequence   | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_sequence              |
    |                           |   single:CRM_alert_sequence                                    |
    |                           |                                                                |
    |                           | A sequence number increased whenever an alert is being issued  |
    |                           | on the local node, which can be used to reference the order in |
    |                           | which alerts have been issued by Pacemaker. An alert for an    |
    |                           | event that happened later in time reliably has a higher        |
    |                           | sequence number than alerts for earlier events.                |
    |                           |                                                                |
    |                           | Be aware that this number has no cluster-wide meaning.         |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_recipient       | .. index::                                                     | 
    |                           |   single:environment variable; CRM_alert_recipient             |
    |                           |   single:CRM_alert_recipient                                   |
    |                           |                                                                |
    |                           | The configured recipient                                       |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_timestamp       | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_timestamp             |
    |                           |   single:CRM_alert_timestamp                                   |
    |                           |                                                                |
    |                           | A timestamp created prior to executing the agent, in the       |
    |                           | format specified by the ``timestamp-format`` meta-attribute.   |
    |                           | This allows the agent to have a reliable, high-precision time  |
    |                           | of when the event occurred, regardless of when the agent       |
    |                           | itself was invoked (which could potentially be delayed due to  |
    |                           | system load, etc.).                                            |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_timestamp_epoch | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_timestamp_epoch       |
    |                           |   single:CRM_alert_timestamp_epoch                             |
    |                           |                                                                |
    |                           | The same time as ``CRM_alert_timestamp``, expressed as the     |
    |                           | integer number of seconds since January 1, 1970. This (along   |
    |                           | with ``CRM_alert_timestamp_usec``) can be useful for alert     |
    |                           | agents that need to format time in a specific way rather than  |
    |                           | let the user configure it.                                     |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_timestamp_usec  | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_timestamp_usec        |
    |                           |   single:CRM_alert_timestamp_usec                              |
    |                           |                                                                |
    |                           | The same time as ``CRM_alert_timestamp``, expressed as the     |
    |                           | integer number of microseconds since                           |
    |                           | ``CRM_alert_timestamp_epoch``.                                 |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_version         | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_version               |
    |                           |   single:CRM_alert_version                                     |
    |                           |                                                                |
    |                           | The version of Pacemaker sending the alert                     |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_desc            | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_desc                  |
    |                           |   single:CRM_alert_desc                                        |
    |                           |                                                                |
    |                           | Detail about event. For ``node`` alerts, this is the node's    |
    |                           | current state (``member`` or ``lost``). For ``fencing``        |
    |                           | alerts, this is a summary of the requested fencing operation,  |
    |                           | including origin, target, and fencing operation error code, if |
    |                           | any. For ``resource`` alerts, this is a readable string        |
    |                           | equivalent of ``CRM_alert_status``.                            |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_nodeid          | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_nodeid                |
    |                           |   single:CRM_alert_nodeid                                      |
    |                           |                                                                |
    |                           | ID of node whose status changed (provided with ``node`` alerts |
    |                           | only)                                                          |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_rc              | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_rc                    |
    |                           |   single:CRM_alert_rc                                          |
    |                           |                                                                |
    |                           | The numerical return code of the fencing or resource operation |
    |                           | (provided with ``fencing`` and ``resource`` alerts only)       |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_task            | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_task                  |
    |                           |   single:CRM_alert_task                                        |
    |                           |                                                                |
    |                           | The requested fencing or resource operation (provided with     |
    |                           | ``fencing`` and ``resource`` alerts only)                      |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_exec_time       | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_exec_time             |
    |                           |   single:CRM_alert_exec_time                                   |
    |                           |                                                                |
    |                           | The (wall-clock) time, in milliseconds, that it took to        |
    |                           | execute the action. If the action timed out,                   |
    |                           | ``CRM_alert_status`` will be 2, ``CRM_alert_desc`` will be     |
    |                           | "Timed Out", and this value will be the action timeout. May    |
    |                           | not be supported on all platforms. (``resource`` alerts only)  |
    |                           | *(since 2.0.1)*                                                |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_interval        | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_interval              |
    |                           |   single:CRM_alert_interval                                    |
    |                           |                                                                |
    |                           | The interval of the resource operation (``resource`` alerts    |
    |                           | only)                                                          |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_rsc             | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_rsc                   |
    |                           |   single:CRM_alert_rsc                                         |
    |                           |                                                                |
    |                           | The name of the affected resource (``resource`` alerts only)   |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_status          | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_status                |
    |                           |   single:CRM_alert_status                                      |
    |                           |                                                                |
    |                           | A numerical code used by Pacemaker to represent the operation  |
    |                           | result (``resource`` alerts only)                              |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_target_rc       | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_target_rc             |
    |                           |   single:CRM_alert_target_rc                                   |
    |                           |                                                                |
    |                           | The expected numerical return code of the operation            |
    |                           | (``resource`` alerts only)                                     |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_attribute_name  | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_attribute_name        |
    |                           |   single:CRM_alert_attribute_name                              |
    |                           |                                                                |
    |                           | The name of the node attribute that changed (``attribute``     |
    |                           | alerts only)                                                   |
    +---------------------------+----------------------------------------------------------------+
    | CRM_alert_attribute_value | .. index::                                                     |
    |                           |   single:environment variable; CRM_alert_attribute_value       |
    |                           |   single:CRM_alert_attribute_value                             |
    |                           |                                                                |
    |                           | The new value of the node attribute that changed               |
    |                           | (``attribute`` alerts only)                                    |
    +---------------------------+----------------------------------------------------------------+
    
 Special concerns when writing alert agents:
    
 * Alert agents may be called with no recipient (if none is configured),
   so the agent must be able to handle this situation, even if it
   only exits in that case. (Users may modify the configuration in
   stages, and add a recipient later.)
    
 * If more than one recipient is configured for an alert, the alert agent will
   be called once per recipient. If an agent is not able to run concurrently, it
   should be configured with only a single recipient. The agent is free,
   however, to interpret the recipient as a list.
    
 * When a cluster event occurs, all alerts are fired off at the same time as
   separate processes. Depending on how many alerts and recipients are
   configured, and on what is done within the alert agents,
   a significant load burst may occur. The agent could be written to take
   this into consideration, for example by queueing resource-intensive actions
   into some other instance, instead of directly executing them.
    
 * Alert agents are run as the ``hacluster`` user, which has a minimal set
   of permissions. If an agent requires additional privileges, it is
   recommended to configure ``sudo`` to allow the agent to run the necessary
   commands as another user with the appropriate privileges.
    
 * As always, take care to validate and sanitize user-configured parameters,
   such as ``CRM_alert_timestamp`` (whose content is specified by the
   user-configured ``timestamp-format``), ``CRM_alert_recipient,`` and all
   instance attributes. Mostly this is needed simply to protect against
   configuration errors, but if some user can modify the CIB without having
   ``hacluster``-level access to the cluster nodes, it is a potential security
   concern as well, to avoid the possibility of code injection.
    
 .. note:: **ocf:pacemaker:ClusterMon compatibility**
 
    The alerts interface is designed to be backward compatible with the external
    scripts interface used by the ``ocf:pacemaker:ClusterMon`` resource, which
    is now deprecated. To preserve this compatibility, the environment variables
    passed to alert agents are available prepended with ``CRM_notify_``
    as well as ``CRM_alert_``. One break in compatibility is that ``ClusterMon``
    ran external scripts as the ``root`` user, while alert agents are run as the
    ``hacluster`` user.
diff --git a/doc/sphinx/Pacemaker_Explained/constraints.rst b/doc/sphinx/Pacemaker_Explained/constraints.rst
index b5b9f8b144..356054761d 100644
--- a/doc/sphinx/Pacemaker_Explained/constraints.rst
+++ b/doc/sphinx/Pacemaker_Explained/constraints.rst
@@ -1,1061 +1,1069 @@
 .. index::
    single: constraint
    single: resource; constraint
 
 .. _constraints:
 
 Resource Constraints
 --------------------
 
 .. index::
    single: resource; score
    single: node; score
 
 Scores
 ######
 
 Scores of all kinds are integral to how the cluster works.
 Practically everything from moving a resource to deciding which
 resource to stop in a degraded cluster is achieved by manipulating
 scores in some way.
 
 Scores are calculated per resource and node. Any node with a
 negative score for a resource can't run that resource. The cluster
 places a resource on the node with the highest score for it.
 
 Infinity Math
 _____________
 
 Pacemaker implements **INFINITY** (or equivalently, **+INFINITY**) internally as a
 score of 1,000,000. Addition and subtraction with it follow these three basic
 rules:
 
 * Any value + **INFINITY** = **INFINITY**
 
 * Any value - **INFINITY** = -**INFINITY**
 
 * **INFINITY** - **INFINITY** = **-INFINITY**
 
 .. note::
 
    What if you want to use a score higher than 1,000,000? Typically this possibility
    arises when someone wants to base the score on some external metric that might
    go above 1,000,000.
 
    The short answer is you can't.
 
    The long answer is it is sometimes possible work around this limitation
    creatively. You may be able to set the score to some computed value based on
    the external metric rather than use the metric directly. For nodes, you can
    store the metric as a node attribute, and query the attribute when computing
    the score (possibly as part of a custom resource agent).
 
 .. _location-constraint:
 
 .. index::
    single: location constraint
    single: constraint; location
 
 Deciding Which Nodes a Resource Can Run On
 ##########################################
 
 *Location constraints* tell the cluster which nodes a resource can run on.
 
 There are two alternative strategies. One way is to say that, by default,
 resources can run anywhere, and then the location constraints specify nodes
 that are not allowed (an *opt-out* cluster). The other way is to start with
 nothing able to run anywhere, and use location constraints to selectively
 enable allowed nodes (an *opt-in* cluster).
 
 Whether you should choose opt-in or opt-out depends on your
 personal preference and the make-up of your cluster.  If most of your
 resources can run on most of the nodes, then an opt-out arrangement is
 likely to result in a simpler configuration.  On the other-hand, if
 most resources can only run on a small subset of nodes, an opt-in
 configuration might be simpler.
 
 .. index::
    pair: XML element; rsc_location
    single: constraint; rsc_location
 
 Location Properties
 ___________________
 
 .. table:: **Attributes of a rsc_location Element**
+   :class: longtable
+   :widths: 1 1 4
 
    +--------------------+---------+----------------------------------------------------------------------------------------------+
    | Attribute          | Default | Description                                                                                  |
    +====================+=========+==============================================================================================+
    | id                 |         | .. index::                                                                                   |
    |                    |         |    single: rsc_location; attribute, id                                                       |
    |                    |         |    single: attribute; id (rsc_location)                                                      |
    |                    |         |    single: id; rsc_location attribute                                                        |
    |                    |         |                                                                                              |
    |                    |         | A unique name for the constraint (required)                                                  |
    +--------------------+---------+----------------------------------------------------------------------------------------------+
    | rsc                |         | .. index::                                                                                   |
    |                    |         |    single: rsc_location; attribute, rsc                                                      |
    |                    |         |    single: attribute; rsc (rsc_location)                                                     |
    |                    |         |    single: rsc; rsc_location attribute                                                       |
    |                    |         |                                                                                              |
    |                    |         | The name of the resource to which this constraint                                            |
    |                    |         | applies. A location constraint must either have a                                            |
    |                    |         | ``rsc``, have a ``rsc-pattern``, or contain at                                               |
    |                    |         | least one resource set.                                                                      |
    +--------------------+---------+----------------------------------------------------------------------------------------------+
    | rsc-pattern        |         | .. index::                                                                                   |
    |                    |         |    single: rsc_location; attribute, rsc-pattern                                              |
    |                    |         |    single: attribute; rsc-pattern (rsc_location)                                             |
    |                    |         |    single: rsc-pattern; rsc_location attribute                                               |
    |                    |         |                                                                                              |
    |                    |         | A pattern matching the names of resources to which                                           |
    |                    |         | this constraint applies.  The syntax is the same as                                          |
    |                    |         | `POSIX <http://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html#tag_09_04>`_ |
    |                    |         | extended regular expressions, with the addition of an                                        |
    |                    |         | initial *!* indicating that resources *not* matching                                         |
    |                    |         | the pattern are selected. If the regular expression                                          |
    |                    |         | contains submatches, and the constraint is governed by                                       |
    |                    |         | a :ref:`rule <rules>`, the submatches can be                                                 |
    |                    |         | referenced as **%1** through **%9** in the rule's                                            |
    |                    |         | ``score-attribute`` or a rule expression's ``attribute``.                                    |
    |                    |         | A location constraint must either have a ``rsc``, have a                                     |
    |                    |         | ``rsc-pattern``, or contain at least one resource set.                                       |
    +--------------------+---------+----------------------------------------------------------------------------------------------+
    | node               |         | .. index::                                                                                   |
    |                    |         |    single: rsc_location; attribute, node                                                     |
    |                    |         |    single: attribute; node (rsc_location)                                                    |
    |                    |         |    single: node; rsc_location attribute                                                      |
    |                    |         |                                                                                              |
    |                    |         | The name of the node to which this constraint applies.                                       |
    |                    |         | A location constraint must either have a ``node`` and                                        |
    |                    |         | ``score``, or contain at least one rule.                                                     |
    +--------------------+---------+----------------------------------------------------------------------------------------------+
    | score              |         | .. index::                                                                                   |
    |                    |         |    single: rsc_location; attribute, score                                                    |
    |                    |         |    single: attribute; score (rsc_location)                                                   |
    |                    |         |    single: score; rsc_location attribute                                                     |
    |                    |         |                                                                                              |
    |                    |         | Positive values indicate a preference for running the                                        |
    |                    |         | affected resource(s) on ``node`` -- the higher the value,                                    |
    |                    |         | the stronger the preference. Negative values indicate                                        |
    |                    |         | the resource(s) should avoid this node (a value of                                           |
    |                    |         | **-INFINITY** changes "should" to "must"). A location                                        |
    |                    |         | constraint must either have a ``node`` and ``score``,                                        |
    |                    |         | or contain at least one rule.                                                                |
    +--------------------+---------+----------------------------------------------------------------------------------------------+
    | resource-discovery | always  | .. index::                                                                                   |
    |                    |         |    single: rsc_location; attribute, resource-discovery                                       |
    |                    |         |    single: attribute; resource-discovery (rsc_location)                                      |
    |                    |         |    single: resource-discovery; rsc_location attribute                                        |
    |                    |         |                                                                                              |
    |                    |         | Whether Pacemaker should perform resource discovery                                          |
    |                    |         | (that is, check whether the resource is already running)                                     |
    |                    |         | for this resource on this node. This should normally be                                      |
    |                    |         | left as the default, so that rogue instances of a                                            |
    |                    |         | service can be stopped when they are running where they                                      |
    |                    |         | are not supposed to be. However, there are two                                               |
    |                    |         | situations where disabling resource discovery is a good                                      |
    |                    |         | idea: when a service is not installed on a node,                                             |
    |                    |         | discovery might return an error (properly written OCF                                        |
    |                    |         | agents will not, so this is usually only seen with other                                     |
    |                    |         | agent types); and when Pacemaker Remote is used to scale                                     |
    |                    |         | a cluster to hundreds of nodes, limiting resource                                            |
    |                    |         | discovery to allowed nodes can significantly boost                                           |
    |                    |         | performance.                                                                                 |
    |                    |         |                                                                                              |
    |                    |         | * ``always:`` Always perform resource discovery for                                          |
    |                    |         |   the specified resource on this node.                                                       |
    |                    |         |                                                                                              |
    |                    |         | * ``never:`` Never perform resource discovery for the                                        |
    |                    |         |   specified resource on this node.  This option should                                       |
    |                    |         |   generally be used with a -INFINITY score, although                                         |
    |                    |         |   that is not strictly required.                                                             |
    |                    |         |                                                                                              |
    |                    |         | * ``exclusive:`` Perform resource discovery for the                                          |
    |                    |         |   specified resource only on this node (and other nodes                                      |
    |                    |         |   similarly marked as ``exclusive``). Multiple location                                      |
    |                    |         |   constraints using ``exclusive`` discovery for the                                          |
    |                    |         |   same resource across different nodes creates a subset                                      |
    |                    |         |   of nodes resource-discovery is exclusive to.  If a                                         |
    |                    |         |   resource is marked for ``exclusive`` discovery on one                                      |
    |                    |         |   or more nodes, that resource is only allowed to be                                         |
    |                    |         |   placed within that subset of nodes.                                                        |
    +--------------------+---------+----------------------------------------------------------------------------------------------+
 
 .. warning::
 
    Setting ``resource-discovery`` to ``never`` or ``exclusive`` removes Pacemaker's
    ability to detect and stop unwanted instances of a service running
    where it's not supposed to be. It is up to the system administrator (you!)
    to make sure that the service can *never* be active on nodes without
    ``resource-discovery`` (such as by leaving the relevant software uninstalled).
 
 .. index::
   single: Asymmetrical Clusters
   single: Opt-In Clusters
 
 Asymmetrical "Opt-In" Clusters
 ______________________________
 
 To create an opt-in cluster, start by preventing resources from running anywhere
 by default:
 
 .. code-block:: none
 
    # crm_attribute --name symmetric-cluster --update false
 
 Then start enabling nodes.  The following fragment says that the web
 server prefers **sles-1**, the database prefers **sles-2** and both can
 fail over to **sles-3** if their most preferred node fails.
 
 .. topic:: Opt-in location constraints for two resources
 
    .. code-block:: xml
 
       <constraints>
           <rsc_location id="loc-1" rsc="Webserver" node="sles-1" score="200"/>
           <rsc_location id="loc-2" rsc="Webserver" node="sles-3" score="0"/>
           <rsc_location id="loc-3" rsc="Database" node="sles-2" score="200"/>
           <rsc_location id="loc-4" rsc="Database" node="sles-3" score="0"/>
       </constraints>
 
 .. index::
   single: Symmetrical Clusters
   single: Opt-Out Clusters
 
 Symmetrical "Opt-Out" Clusters
 ______________________________
 
 To create an opt-out cluster, start by allowing resources to run
 anywhere by default:
 
 .. code-block:: none
 
    # crm_attribute --name symmetric-cluster --update true
 
 Then start disabling nodes.  The following fragment is the equivalent
 of the above opt-in configuration.
 
 .. topic:: Opt-out location constraints for two resources
 
    .. code-block:: xml
 
       <constraints>
           <rsc_location id="loc-1" rsc="Webserver" node="sles-1" score="200"/>
           <rsc_location id="loc-2-do-not-run" rsc="Webserver" node="sles-2" score="-INFINITY"/>
           <rsc_location id="loc-3-do-not-run" rsc="Database" node="sles-1" score="-INFINITY"/>
           <rsc_location id="loc-4" rsc="Database" node="sles-2" score="200"/>
       </constraints>
 
 .. _node-score-equal:
 
 What if Two Nodes Have the Same Score
 _____________________________________
 
 If two nodes have the same score, then the cluster will choose one.
 This choice may seem random and may not be what was intended, however
 the cluster was not given enough information to know any better.
 
 .. topic:: Constraints where a resource prefers two nodes equally
 
    .. code-block:: xml
 
       <constraints>
           <rsc_location id="loc-1" rsc="Webserver" node="sles-1" score="INFINITY"/>
           <rsc_location id="loc-2" rsc="Webserver" node="sles-2" score="INFINITY"/>
           <rsc_location id="loc-3" rsc="Database" node="sles-1" score="500"/>
           <rsc_location id="loc-4" rsc="Database" node="sles-2" score="300"/>
           <rsc_location id="loc-5" rsc="Database" node="sles-2" score="200"/>
       </constraints>
 
 In the example above, assuming no other constraints and an inactive
 cluster, **Webserver** would probably be placed on **sles-1** and **Database** on
 **sles-2**.  It would likely have placed **Webserver** based on the node's
 uname and **Database** based on the desire to spread the resource load
 evenly across the cluster.  However other factors can also be involved
 in more complex configurations.
 
 .. index::
    single: constraint; ordering
    single: resource; start order
 
 .. _s-resource-ordering:
 
 Specifying the Order in which Resources Should Start/Stop
 #########################################################
 
 *Ordering constraints* tell the cluster the order in which certain
 resource actions should occur.
 
 .. important::
 
    Ordering constraints affect *only* the ordering of resource actions;
    they do *not* require that the resources be placed on the
    same node. If you want resources to be started on the same node
    *and* in a specific order, you need both an ordering constraint *and*
    a colocation constraint (see :ref:`s-resource-colocation`), or
    alternatively, a group (see :ref:`group-resources`).
 
 .. index::
    pair: XML element; rsc_order
    pair: constraint; rsc_order
 
 Ordering Properties
 ___________________
 
 .. table:: **Attributes of a rsc_order Element**
+   :class: longtable
+   :widths: 1 2 4
 
    +--------------+----------------------------+-------------------------------------------------------------------+
    | Field        | Default                    | Description                                                       |
    +==============+============================+===================================================================+
    | id           |                            | .. index::                                                        |
    |              |                            |    single: rsc_order; attribute, id                               |
    |              |                            |    single: attribute; id (rsc_order)                              |
    |              |                            |    single: id; rsc_order attribute                                |
    |              |                            |                                                                   |
    |              |                            | A unique name for the constraint                                  |
    +--------------+----------------------------+-------------------------------------------------------------------+
    | first        |                            | .. index::                                                        |
    |              |                            |    single: rsc_order; attribute, first                            |
    |              |                            |    single: attribute; first (rsc_order)                           |
    |              |                            |    single: first; rsc_order attribute                             |
    |              |                            |                                                                   |
    |              |                            | Name of the resource that the ``then`` resource                   |
    |              |                            | depends on                                                        |
    +--------------+----------------------------+-------------------------------------------------------------------+
    | then         |                            | .. index::                                                        |
    |              |                            |    single: rsc_order; attribute, then                             |
    |              |                            |    single: attribute; then (rsc_order)                            |
    |              |                            |    single: then; rsc_order attribute                              |
    |              |                            |                                                                   |
    |              |                            | Name of the dependent resource                                    |
    +--------------+----------------------------+-------------------------------------------------------------------+
    | first-action | start                      | .. index::                                                        |
    |              |                            |    single: rsc_order; attribute, first-action                     |
    |              |                            |    single: attribute; first-action (rsc_order)                    |
    |              |                            |    single: first-action; rsc_order attribute                      |
    |              |                            |                                                                   |
    |              |                            | The action that the ``first`` resource must complete              |
    |              |                            | before ``then-action`` can be initiated for the ``then``          |
    |              |                            | resource.  Allowed values: ``start``, ``stop``,                   |
    |              |                            | ``promote``, ``demote``.                                          |
    +--------------+----------------------------+-------------------------------------------------------------------+
    | then-action  | value of ``first-action``  | .. index::                                                        |
    |              |                            |    single: rsc_order; attribute, then-action                      |
    |              |                            |    single: attribute; then-action (rsc_order)                     |
    |              |                            |    single: first-action; rsc_order attribute                      |
    |              |                            |                                                                   |
    |              |                            | The action that the ``then`` resource can execute only            |
    |              |                            | after the ``first-action`` on the ``first`` resource has          |
    |              |                            | completed.  Allowed values: ``start``, ``stop``,                  |
    |              |                            | ``promote``, ``demote``.                                          |
    +--------------+----------------------------+-------------------------------------------------------------------+
    | kind         | Mandatory                  | .. index::                                                        |
    |              |                            |    single: rsc_order; attribute, kind                             |
    |              |                            |    single: attribute; kind (rsc_order)                            |
    |              |                            |    single: kind; rsc_order attribute                              |
    |              |                            |                                                                   |
    |              |                            | How to enforce the constraint. Allowed values:                    |
    |              |                            |                                                                   |
    |              |                            | * ``Mandatory:`` ``then-action`` will never be initiated          |
    |              |                            |   for the ``then`` resource unless and until ``first-action``     |
    |              |                            |   successfully completes for the ``first`` resource.              |
    |              |                            |                                                                   |
    |              |                            | * ``Optional:`` The constraint applies only if both specified     |
    |              |                            |   resource actions are scheduled in the same transition           |
    |              |                            |   (that is, in response to the same cluster state). This          |
    |              |                            |   means that ``then-action`` is allowed on the ``then``           |
    |              |                            |   resource regardless of the state of the ``first`` resource,     |
    |              |                            |   but if both actions happen to be scheduled at the same time,    |
    |              |                            |   they will be ordered.                                           |
    |              |                            |                                                                   |
    |              |                            | * ``Serialize:`` Ensure that the specified actions are never      |
    |              |                            |   performed concurrently for the specified resources.             |
    |              |                            |   ``First-action`` and ``then-action`` can be executed in either  |
    |              |                            |   order, but one must complete before the other can be initiated. |
    |              |                            |   An example use case is when resource start-up puts a high load  |
    |              |                            |   on the host.                                                    |
    +--------------+----------------------------+-------------------------------------------------------------------+
    | symmetrical  | TRUE for ``Mandatory`` and | .. index::                                                        |
    |              | ``Optional`` kinds. FALSE  |    single: rsc_order; attribute, symmetrical                      |
    |              | for ``Serialize`` kind.    |    single: attribute; symmetrical (rsc)order)                     |
    |              |                            |    single: symmetrical; rsc_order attribute                       |
    |              |                            |                                                                   |
    |              |                            | If true, the reverse of the constraint applies for the            |
    |              |                            | opposite action (for example, if B starts after A starts,         |
    |              |                            | then B stops before A stops).  ``Serialize`` orders cannot        |
    |              |                            | be symmetrical.                                                   |
    +--------------+----------------------------+-------------------------------------------------------------------+
 
 ``Promote`` and ``demote`` apply to :ref:`promotable <s-resource-promotable>`
 clone resources.
 
 Optional and mandatory ordering
 _______________________________
 
 Here is an example of ordering constraints where **Database** *must* start before
 **Webserver**, and **IP** *should* start before **Webserver** if they both need to be
 started:
 
 .. topic:: Optional and mandatory ordering constraints
 
    .. code-block:: xml
 
       <constraints>
           <rsc_order id="order-1" first="IP" then="Webserver" kind="Optional"/>
           <rsc_order id="order-2" first="Database" then="Webserver" kind="Mandatory" />
       </constraints>
 
 Because the above example lets ``symmetrical`` default to TRUE, **Webserver**
 must be stopped before **Database** can be stopped, and **Webserver** should be
 stopped before **IP** if they both need to be stopped.
 
 .. index::
    single: colocation
    single: constraint; colocation
    single: resource; location relative to other resources
 
 .. _s-resource-colocation:
 
 Placing Resources Relative to other Resources
 #############################################
 
 *Colocation constraints* tell the cluster that the location of one resource
 depends on the location of another one.
 
 Colocation has an important side-effect: it affects the order in which
 resources are assigned to a node. Think about it: You can't place A relative to
 B unless you know where B is [#]_.
 
 So when you are creating colocation constraints, it is important to
 consider whether you should colocate A with B, or B with A.
 
 .. important::
 
    Colocation constraints affect *only* the placement of resources; they do *not*
    require that the resources be started in a particular order. If you want
    resources to be started on the same node *and* in a specific order, you need
    both an ordering constraint (see :ref:`s-resource-ordering`) *and* a colocation
    constraint, or alternatively, a group (see :ref:`group-resources`).
 
 .. index::
    pair: XML element; rsc_colocation
    single: constraint; rsc_colocation
 
 Colocation Properties
 _____________________
 
 .. table:: **Attributes of a rsc_colocation Constraint**
+   :class: longtable
+   :widths: 2 2 5
 
    +----------------+----------------+--------------------------------------------------------+
    | Field          | Default        | Description                                            |
    +================+================+========================================================+
    | id             |                | .. index::                                             |
    |                |                |    single: rsc_colocation; attribute, id               |
    |                |                |    single: attribute; id (rsc_colocation)              |
    |                |                |    single: id; rsc_colocation attribute                |
    |                |                |                                                        |
    |                |                | A unique name for the constraint (required).           |
    +----------------+----------------+--------------------------------------------------------+
    | rsc            |                | .. index::                                             |
    |                |                |    single: rsc_colocation; attribute, rsc              |
    |                |                |    single: attribute; rsc (rsc_colocation)             |
    |                |                |    single: rsc; rsc_colocation attribute               |
    |                |                |                                                        |
    |                |                | The name of a resource that should be located          |
    |                |                | relative to ``with-rsc``. A colocation constraint must |
    |                |                | either contain at least one                            |
    |                |                | :ref:`resource set <s-resource-sets>`, or specify both |
    |                |                | ``rsc`` and ``with-rsc``.                              |
    +----------------+----------------+--------------------------------------------------------+
    | with-rsc       |                | .. index::                                             |
    |                |                |    single: rsc_colocation; attribute, with-rsc         |
    |                |                |    single: attribute; with-rsc (rsc_colocation)        |
    |                |                |    single: with-rsc; rsc_colocation attribute          |
    |                |                |                                                        |
    |                |                | The name of the resource used as the colocation        |
    |                |                | target. The cluster will decide where to put this      |
    |                |                | resource first and then decide where to put ``rsc``.   |
    |                |                | A colocation constraint must either contain at least   |
    |                |                | one :ref:`resource set <s-resource-sets>`, or specify  |
    |                |                | both ``rsc`` and ``with-rsc``.                         |
    +----------------+----------------+--------------------------------------------------------+
    | node-attribute | #uname         | .. index::                                             |
    |                |                |    single: rsc_colocation; attribute, node-attribute   |
    |                |                |    single: attribute; node-attribute (rsc_colocation)  |
    |                |                |    single: node-attribute; rsc_colocation attribute    |
    |                |                |                                                        |
    |                |                | If ``rsc`` and ``with-rsc`` are specified, this node   |
    |                |                | attribute must be the same on the node running ``rsc`` |
    |                |                | and the node running ``with-rsc`` for the constraint   |
    |                |                | to be satisfied. (For details, see                     |
    |                |                | :ref:`s-coloc-attribute`.)                             |
    +----------------+----------------+--------------------------------------------------------+
    | score          | 0              | .. index::                                             |
    |                |                |    single: rsc_colocation; attribute, score            |
    |                |                |    single: attribute; score (rsc_colocation)           |
    |                |                |    single: score; rsc_colocation attribute             |
    |                |                |                                                        |
    |                |                | Positive values indicate the resources should run on   |
    |                |                | the same node. Negative values indicate the resources  |
    |                |                | should run on different nodes. Values of               |
    |                |                | +/- ``INFINITY`` change "should" to "must".            |
    +----------------+----------------+--------------------------------------------------------+
    | rsc-role       | Started        | .. index::                                             |
    |                |                |    single: clone; ordering constraint, rsc-role        |
    |                |                |    single: ordering constraint; rsc-role (clone)       |
    |                |                |    single: rsc-role; clone ordering constraint         |
    |                |                |                                                        |
    |                |                | If ``rsc`` and ``with-rsc`` are specified, and ``rsc`` |
    |                |                | is a :ref:`promotable clone <s-resource-promotable>`,  |
    |                |                | the constraint applies only to ``rsc`` instances in    |
    |                |                | this role. Allowed values: ``Started``, ``Promoted``,  |
    |                |                | ``Unpromoted``. For details, see                       |
    |                |                | :ref:`promotable-clone-constraints`.                   |
    +----------------+----------------+--------------------------------------------------------+
    | with-rsc-role  | Started        | .. index::                                             |
    |                |                |    single: clone; ordering constraint, with-rsc-role   |
    |                |                |    single: ordering constraint; with-rsc-role (clone)  |
    |                |                |    single: with-rsc-role; clone ordering constraint    |
    |                |                |                                                        |
    |                |                | If ``rsc`` and ``with-rsc`` are specified, and         |
    |                |                | ``with-rsc`` is a                                      |
    |                |                | :ref:`promotable clone <s-resource-promotable>`, the   |
    |                |                | constraint applies only to ``with-rsc`` instances in   |
    |                |                | this role. Allowed values: ``Started``, ``Promoted``,  |
    |                |                | ``Unpromoted``. For details, see                       |
    |                |                | :ref:`promotable-clone-constraints`.                   |
    +----------------+----------------+--------------------------------------------------------+
    | influence      | value of       | .. index::                                             |
    |                | ``critical``   |    single: rsc_colocation; attribute, influence        |
    |                | meta-attribute |    single: attribute; influence (rsc_colocation)       |
    |                | for ``rsc``    |    single: influence; rsc_colocation attribute         |
    |                |                |                                                        |
    |                |                | Whether to consider the location preferences of        |
    |                |                | ``rsc`` when ``with-rsc`` is already active. Allowed   |
    |                |                | values: ``true``, ``false``. For details, see          |
    |                |                | :ref:`s-coloc-influence`. *(since 2.1.0)*              |
    +----------------+----------------+--------------------------------------------------------+
 
 Mandatory Placement
 ___________________
 
 Mandatory placement occurs when the constraint's score is
 **+INFINITY** or **-INFINITY**.  In such cases, if the constraint can't be
 satisfied, then the **rsc** resource is not permitted to run.  For
 ``score=INFINITY``, this includes cases where the ``with-rsc`` resource is
 not active.
 
 If you need resource **A** to always run on the same machine as
 resource **B**, you would add the following constraint:
 
 .. topic:: Mandatory colocation constraint for two resources
 
    .. code-block:: xml
 
       <rsc_colocation id="colocate" rsc="A" with-rsc="B" score="INFINITY"/>
 
 Remember, because **INFINITY** was used, if **B** can't run on any
 of the cluster nodes (for whatever reason) then **A** will not
 be allowed to run. Whether **A** is running or not has no effect on **B**.
 
 Alternatively, you may want the opposite -- that **A** *cannot*
 run on the same machine as **B**.  In this case, use ``score="-INFINITY"``.
 
 .. topic:: Mandatory anti-colocation constraint for two resources
 
    .. code-block:: xml
 
       <rsc_colocation id="anti-colocate" rsc="A" with-rsc="B" score="-INFINITY"/>
 
 Again, by specifying **-INFINITY**, the constraint is binding.  So if the
 only place left to run is where **B** already is, then **A** may not run anywhere.
 
 As with **INFINITY**, **B** can run even if **A** is stopped.  However, in this
 case **A** also can run if **B** is stopped, because it still meets the
 constraint of **A** and **B** not running on the same node.
 
 Advisory Placement
 __________________
 
 If mandatory placement is about "must" and "must not", then advisory
 placement is the "I'd prefer if" alternative.  For constraints with
 scores greater than **-INFINITY** and less than **INFINITY**, the cluster
 will try to accommodate your wishes but may ignore them if the
 alternative is to stop some of the cluster resources.
 
 As in life, where if enough people prefer something it effectively
 becomes mandatory, advisory colocation constraints can combine with
 other elements of the configuration to behave as if they were
 mandatory.
 
 .. topic:: Advisory colocation constraint for two resources
 
    .. code-block:: xml
 
       <rsc_colocation id="colocate-maybe" rsc="A" with-rsc="B" score="500"/>
 
 .. _s-coloc-attribute:
 
 Colocation by Node Attribute
 ____________________________
 
 The ``node-attribute`` property of a colocation constraints allows you to express
 the requirement, "these resources must be on similar nodes".
 
 As an example, imagine that you have two Storage Area Networks (SANs) that are
 not controlled by the cluster, and each node is connected to one or the other.
 You may have two resources **r1** and **r2** such that **r2** needs to use the same
 SAN as **r1**, but doesn't necessarily have to be on the same exact node.
 In such a case, you could define a :ref:`node attribute <node_attributes>` named
 **san**, with the value **san1** or **san2** on each node as appropriate. Then, you
 could colocate **r2** with **r1** using ``node-attribute`` set to **san**.
 
 .. _s-coloc-influence:
 
 Colocation Influence
 ____________________
 
 By default, if A is colocated with B, the cluster will take into account A's
 preferences when deciding where to place B, to maximize the chance that both
 resources can run.
 
 For a detailed look at exactly how this occurs, see
 `Colocation Explained <http://clusterlabs.org/doc/Colocation_Explained.pdf>`_.
 
 However, if ``influence`` is set to ``false`` in the colocation constraint,
 this will happen only if B is inactive and needing to be started. If B is
 already active, A's preferences will have no effect on placing B.
 
 An example of what effect this would have and when it would be desirable would
 be a nonessential reporting tool colocated with a resource-intensive service
 that takes a long time to start. If the reporting tool fails enough times to
 reach its migration threshold, by default the cluster will want to move both
 resources to another node if possible. Setting ``influence`` to ``false`` on
 the colocation constraint would mean that the reporting tool would be stopped
 in this situation instead, to avoid forcing the service to move.
 
 The ``critical`` resource meta-attribute is a convenient way to specify the
 default for all colocation constraints and groups involving a particular
 resource.
 
 .. note::
 
    If a noncritical resource is a member of a group, all later members of the
    group will be treated as noncritical, even if they are marked as (or left to
    default to) critical.
 
 
 .. _s-resource-sets:
 
 Resource Sets
 #############
 
 .. index::
    single: constraint; resource set
    single: resource; resource set
 
 *Resource sets* allow multiple resources to be affected by a single constraint.
 
 .. topic:: A set of 3 resources
 
    .. code-block:: xml
 
       <resource_set id="resource-set-example">
           <resource_ref id="A"/>
           <resource_ref id="B"/>
           <resource_ref id="C"/>
       </resource_set>
 
 Resource sets are valid inside ``rsc_location``, ``rsc_order``
 (see :ref:`s-resource-sets-ordering`), ``rsc_colocation``
 (see :ref:`s-resource-sets-colocation`), and ``rsc_ticket``
 (see :ref:`ticket-constraints`) constraints.
 
 A resource set has a number of properties that can be set, though not all
 have an effect in all contexts.
 
 .. index::
    pair: XML element; resource_set
 
-.. topic:: **Attributes of a resource_set Element**
+.. table:: **Attributes of a resource_set Element**
+   :class: longtable
+   :widths: 2 2 5
 
    +-------------+------------------+--------------------------------------------------------+
    | Field       | Default          | Description                                            |
    +=============+==================+========================================================+
    | id          |                  | .. index::                                             |
    |             |                  |    single: resource_set; attribute, id                 |
    |             |                  |    single: attribute; id (resource_set)                |
    |             |                  |    single: id; resource_set attribute                  |
    |             |                  |                                                        |
    |             |                  | A unique name for the set (required)                   |
    +-------------+------------------+--------------------------------------------------------+
    | sequential  | true             | .. index::                                             |
    |             |                  |    single: resource_set; attribute, sequential         |
    |             |                  |    single: attribute; sequential (resource_set)        |
    |             |                  |    single: sequential; resource_set attribute          |
    |             |                  |                                                        |
    |             |                  | Whether the members of the set must be acted on in     |
    |             |                  | order.  Meaningful within ``rsc_order`` and            |
    |             |                  | ``rsc_colocation``.                                    |
    +-------------+------------------+--------------------------------------------------------+
    | require-all | true             | .. index::                                             |
    |             |                  |    single: resource_set; attribute, require-all        |
    |             |                  |    single: attribute; require-all (resource_set)       |
    |             |                  |    single: require-all; resource_set attribute         |
    |             |                  |                                                        |
    |             |                  | Whether all members of the set must be active before   |
    |             |                  | continuing.  With the current implementation, the      |
    |             |                  | cluster may continue even if only one member of the    |
    |             |                  | set is started, but if more than one member of the set |
    |             |                  | is starting at the same time, the cluster will still   |
    |             |                  | wait until all of those have started before continuing |
    |             |                  | (this may change in future versions).  Meaningful      |
    |             |                  | within ``rsc_order``.                                  |
    +-------------+------------------+--------------------------------------------------------+
    | role        |                  | .. index::                                             |
    |             |                  |    single: resource_set; attribute, role               |
    |             |                  |    single: attribute; role (resource_set)              |
    |             |                  |    single: role; resource_set attribute                |
    |             |                  |                                                        |
    |             |                  | The constraint applies only to resource set members    |
    |             |                  | that are :ref:`s-resource-promotable` in this          |
    |             |                  | role.  Meaningful within ``rsc_location``,             |
    |             |                  | ``rsc_colocation`` and ``rsc_ticket``.                 |
    |             |                  | Allowed values: ``Started``, ``Promoted``,             |
    |             |                  | ``Unpromoted``. For details, see                       |
    |             |                  | :ref:`promotable-clone-constraints`.                   |
    +-------------+------------------+--------------------------------------------------------+
    | action      | value of         | .. index::                                             |
    |             | ``first-action`` |    single: resource_set; attribute, action             |
    |             | in the enclosing |    single: attribute; action (resource_set)            |
    |             | ordering         |    single: action; resource_set attribute              |
    |             | constraint       |                                                        |
    |             |                  | The action that applies to *all members* of the set.   |
    |             |                  | Meaningful within ``rsc_order``. Allowed values:       |
    |             |                  | ``start``, ``stop``, ``promote``, ``demote``.          |
    +-------------+------------------+--------------------------------------------------------+
    | score       |                  | .. index::                                             |
    |             |                  |    single: resource_set; attribute, score              |
    |             |                  |    single: attribute; score (resource_set)             |
    |             |                  |    single: score; resource_set attribute               |
    |             |                  |                                                        |
    |             |                  | *Advanced use only.* Use a specific score for this     |
    |             |                  | set within the constraint.                             |
    +-------------+------------------+--------------------------------------------------------+
 
 .. _s-resource-sets-ordering:
 
 Ordering Sets of Resources
 ##########################
 
 A common situation is for an administrator to create a chain of ordered
 resources, such as:
 
 .. topic:: A chain of ordered resources
 
    .. code-block:: xml
 
       <constraints>
           <rsc_order id="order-1" first="A" then="B" />
           <rsc_order id="order-2" first="B" then="C" />
           <rsc_order id="order-3" first="C" then="D" />
       </constraints>
 
 .. topic:: Visual representation of the four resources' start order for the above constraints
 
    .. image:: images/resource-set.png
       :alt: Ordered set
 
 Ordered Set
 ___________
 
 To simplify this situation, :ref:`s-resource-sets` can be used within ordering
 constraints:
 
 .. topic:: A chain of ordered resources expressed as a set
 
    .. code-block:: xml
 
       <constraints>
           <rsc_order id="order-1">
             <resource_set id="ordered-set-example" sequential="true">
               <resource_ref id="A"/>
               <resource_ref id="B"/>
               <resource_ref id="C"/>
               <resource_ref id="D"/>
             </resource_set>
           </rsc_order>
       </constraints>
 
 While the set-based format is not less verbose, it is significantly easier to
 get right and maintain.
 
 .. important::
 
    If you use a higher-level tool, pay attention to how it exposes this
    functionality. Depending on the tool, creating a set **A B** may be equivalent to
    **A then B**, or **B then A**.
 
 Ordering Multiple Sets
 ______________________
 
 The syntax can be expanded to allow sets of resources to be ordered relative to
 each other, where the members of each individual set may be ordered or
 unordered (controlled by the ``sequential`` property). In the example below, **A**
 and **B** can both start in parallel, as can **C** and **D**, however **C** and
 **D** can only start once *both* **A** *and* **B** are active.
 
 .. topic:: Ordered sets of unordered resources
 
    .. code-block:: xml
 
       <constraints>
           <rsc_order id="order-1">
               <resource_set id="ordered-set-1" sequential="false">
                   <resource_ref id="A"/>
                   <resource_ref id="B"/>
               </resource_set>
               <resource_set id="ordered-set-2" sequential="false">
                   <resource_ref id="C"/>
                   <resource_ref id="D"/>
               </resource_set>
           </rsc_order>
       </constraints>
 
 .. topic:: Visual representation of the start order for two ordered sets of
            unordered resources
 
    .. image:: images/two-sets.png
       :alt: Two ordered sets
 
 Of course either set -- or both sets -- of resources can also be internally
 ordered (by setting ``sequential="true"``) and there is no limit to the number
 of sets that can be specified.
 
 .. topic:: Advanced use of set ordering - Three ordered sets, two of which are
            internally unordered
 
    .. code-block:: xml
 
       <constraints>
           <rsc_order id="order-1">
             <resource_set id="ordered-set-1" sequential="false">
               <resource_ref id="A"/>
               <resource_ref id="B"/>
             </resource_set>
             <resource_set id="ordered-set-2" sequential="true">
               <resource_ref id="C"/>
               <resource_ref id="D"/>
             </resource_set>
             <resource_set id="ordered-set-3" sequential="false">
               <resource_ref id="E"/>
               <resource_ref id="F"/>
             </resource_set>
           </rsc_order>
       </constraints>
 
 .. topic:: Visual representation of the start order for the three sets defined above
 
    .. image:: images/three-sets.png
       :alt: Three ordered sets
 
 .. important::
 
    An ordered set with ``sequential=false`` makes sense only if there is another
    set in the constraint. Otherwise, the constraint has no effect.
 
 Resource Set OR Logic
 _____________________
 
 The unordered set logic discussed so far has all been "AND" logic.  To illustrate
 this take the 3 resource set figure in the previous section.  Those sets can be
 expressed, **(A and B) then (C) then (D) then (E and F)**.
 
 Say for example we want to change the first set, **(A and B)**, to use "OR" logic
 so the sets look like this: **(A or B) then (C) then (D) then (E and F)**.  This
 functionality can be achieved through the use of the ``require-all`` option.
 This option defaults to TRUE which is why the "AND" logic is used by default.
 Setting ``require-all=false`` means only one resource in the set needs to be
 started before continuing on to the next set.
 
 .. topic:: Resource Set "OR" logic: Three ordered sets, where the first set is
            internally unordered with "OR" logic
 
    .. code-block:: xml
 
       <constraints>
           <rsc_order id="order-1">
             <resource_set id="ordered-set-1" sequential="false" require-all="false">
               <resource_ref id="A"/>
               <resource_ref id="B"/>
             </resource_set>
             <resource_set id="ordered-set-2" sequential="true">
               <resource_ref id="C"/>
               <resource_ref id="D"/>
             </resource_set>
             <resource_set id="ordered-set-3" sequential="false">
               <resource_ref id="E"/>
               <resource_ref id="F"/>
             </resource_set>
           </rsc_order>
       </constraints>
 
 .. important::
 
    An ordered set with ``require-all=false`` makes sense only in conjunction with
    ``sequential=false``. Think of it like this: ``sequential=false`` modifies the set
    to be an unordered set using "AND" logic by default, and adding
    ``require-all=false`` flips the unordered set's "AND" logic to "OR" logic.
 
 .. _s-resource-sets-colocation:
 
 Colocating Sets of Resources
 ############################
 
 Another common situation is for an administrator to create a set of
 colocated resources.
 
 The simplest way to do this is to define a resource group (see
 :ref:`group-resources`), but that cannot always accurately express the desired
 relationships. For example, maybe the resources do not need to be ordered.
 
 Another way would be to define each relationship as an individual constraint,
 but that causes a difficult-to-follow constraint explosion as the number of
 resources and combinations grow.
 
 .. topic:: Colocation chain as individual constraints, where A is placed first,
            then B, then C, then D
 
    .. code-block:: xml
 
       <constraints>
           <rsc_colocation id="coloc-1" rsc="D" with-rsc="C" score="INFINITY"/>
           <rsc_colocation id="coloc-2" rsc="C" with-rsc="B" score="INFINITY"/>
           <rsc_colocation id="coloc-3" rsc="B" with-rsc="A" score="INFINITY"/>
       </constraints>
 
 To express complicated relationships with a simplified syntax [#]_,
 :ref:`resource sets <s-resource-sets>` can be used within colocation constraints.
 
 .. topic:: Equivalent colocation chain expressed using **resource_set**
 
    .. code-block:: xml
 
       <constraints>
           <rsc_colocation id="coloc-1" score="INFINITY" >
             <resource_set id="colocated-set-example" sequential="true">
               <resource_ref id="A"/>
               <resource_ref id="B"/>
               <resource_ref id="C"/>
               <resource_ref id="D"/>
             </resource_set>
           </rsc_colocation>
       </constraints>
 
 .. note::
 
    Within a ``resource_set``, the resources are listed in the order they are
    *placed*, which is the reverse of the order in which they are *colocated*.
    In the above example, resource **A** is placed before resource **B**, which is
    the same as saying resource **B** is colocated with resource **A**.
 
 As with individual constraints, a resource that can't be active prevents any
 resource that must be colocated with it from being active. In both of the two
 previous examples, if **B** is unable to run, then both **C** and by inference **D**
 must remain stopped.
 
 .. important::
 
    If you use a higher-level tool, pay attention to how it exposes this
    functionality. Depending on the tool, creating a set **A B** may be equivalent to
    **A with B**, or **B with A**.
 
 Resource sets can also be used to tell the cluster that entire *sets* of
 resources must be colocated relative to each other, while the individual
 members within any one set may or may not be colocated relative to each other
 (determined by the set's ``sequential`` property).
 
 In the following example, resources **B**, **C**, and **D** will each be colocated
 with **A** (which will be placed first). **A** must be able to run in order for any
 of the resources to run, but any of **B**, **C**, or **D** may be stopped without
 affecting any of the others.
 
 .. topic:: Using colocated sets to specify a shared dependency
 
    .. code-block:: xml
 
       <constraints>
           <rsc_colocation id="coloc-1" score="INFINITY" >
             <resource_set id="colocated-set-2" sequential="false">
               <resource_ref id="B"/>
               <resource_ref id="C"/>
               <resource_ref id="D"/>
             </resource_set>
             <resource_set id="colocated-set-1" sequential="true">
               <resource_ref id="A"/>
             </resource_set>
           </rsc_colocation>
       </constraints>
 
 .. note::
 
    Pay close attention to the order in which resources and sets are listed.
    While the members of any one sequential set are placed first to last (i.e., the
    colocation dependency is last with first), multiple sets are placed last to
    first (i.e. the colocation dependency is first with last).
 
 .. important::
 
    A colocated set with ``sequential="false"`` makes sense only if there is
    another set in the constraint. Otherwise, the constraint has no effect.
 
 There is no inherent limit to the number and size of the sets used.
 The only thing that matters is that in order for any member of one set
 in the constraint to be active, all members of sets listed after it must also
 be active (and naturally on the same node); and if a set has ``sequential="true"``,
 then in order for one member of that set to be active, all members listed
 before it must also be active.
 
 If desired, you can restrict the dependency to instances of promotable clone
 resources that are in a specific role, using the set's ``role`` property.
 
 .. topic:: Colocation in which the members of the middle set have no
            interdependencies, and the last set listed applies only to promoted
            instances
 
    .. code-block:: xml
 
       <constraints>
           <rsc_colocation id="coloc-1" score="INFINITY" >
             <resource_set id="colocated-set-1" sequential="true">
               <resource_ref id="F"/>
               <resource_ref id="G"/>
             </resource_set>
             <resource_set id="colocated-set-2" sequential="false">
               <resource_ref id="C"/>
               <resource_ref id="D"/>
               <resource_ref id="E"/>
             </resource_set>
             <resource_set id="colocated-set-3" sequential="true" role="Promoted">
               <resource_ref id="A"/>
               <resource_ref id="B"/>
             </resource_set>
           </rsc_colocation>
       </constraints>
 
 .. topic:: Visual representation of the above example (resources are placed from
            left to right)
 
    .. image:: ../shared/images/pcmk-colocated-sets.png
       :alt: Colocation chain
 
 .. note::
 
    Unlike ordered sets, colocated sets do not use the ``require-all`` option.
 
 .. [#] While the human brain is sophisticated enough to read the constraint
        in any order and choose the correct one depending on the situation,
        the cluster is not quite so smart. Yet.
 
 .. [#] which is not the same as saying easy to follow
diff --git a/doc/sphinx/Pacemaker_Explained/fencing.rst b/doc/sphinx/Pacemaker_Explained/fencing.rst
index 680d5d87b0..4e8d3a2f78 100644
--- a/doc/sphinx/Pacemaker_Explained/fencing.rst
+++ b/doc/sphinx/Pacemaker_Explained/fencing.rst
@@ -1,1292 +1,1298 @@
 .. index::
    single: fencing
    single: STONITH
 
 .. _fencing:
 
 Fencing
 -------
 
 What Is Fencing?
 ################
 
 *Fencing* is the ability to make a node unable to run resources, even when that
 node is unresponsive to cluster commands.
 
 Fencing is also known as *STONITH*, an acronym for "Shoot The Other Node In The
 Head", since the most common fencing method is cutting power to the node.
 Another method is "fabric fencing", cutting the node's access to some
 capability required to run resources (such as network access or a shared disk).
 
 .. index::
    single: fencing; why necessary
 
 Why Is Fencing Necessary?
 #########################
 
 Fencing protects your data from being corrupted by malfunctioning nodes or
 unintentional concurrent access to shared resources.
 
 Fencing protects against the "split brain" failure scenario, where cluster
 nodes have lost the ability to reliably communicate with each other but are
 still able to run resources. If the cluster just assumed that uncommunicative
 nodes were down, then multiple instances of a resource could be started on
 different nodes.
 
 The effect of split brain depends on the resource type. For example, an IP
 address brought up on two hosts on a network will cause packets to randomly be
 sent to one or the other host, rendering the IP useless. For a database or
 clustered file system, the effect could be much more severe, causing data
 corruption or divergence.
 
 Fencing is also used when a resource cannot otherwise be stopped. If a
 resource fails to stop on a node, it cannot be started on a different node
 without risking the same type of conflict as split-brain. Fencing the
 original node ensures the resource can be safely started elsewhere.
 
 Users may also configure the ``on-fail`` property of :ref:`operation` or the
 ``loss-policy`` property of
 :ref:`ticket constraints <ticket-constraints>` to ``fence``, in which
 case the cluster will fence the resource's node if the operation fails or the
 ticket is lost.
 
 .. index::
    single: fencing; device
 
 Fence Devices
 #############
 
 A *fence device* or *fencing device* is a special type of resource that
 provides the means to fence a node.
 
 Examples of fencing devices include intelligent power switches and IPMI devices
 that accept SNMP commands to cut power to a node, and iSCSI controllers that
 allow SCSI reservations to be used to cut a node's access to a shared disk.
 
 Since fencing devices will be used to recover from loss of networking
 connectivity to other nodes, it is essential that they do not rely on the same
 network as the cluster itself, otherwise that network becomes a single point of
 failure.
 
 Since loss of a node due to power outage is indistinguishable from loss of
 network connectivity to that node, it is also essential that at least one fence
 device for a node does not share power with that node. For example, an on-board
 IPMI controller that shares power with its host should not be used as the sole
 fencing device for that host.
 
 Since fencing is used to isolate malfunctioning nodes, no fence device should
 rely on its target functioning properly. This includes, for example, devices
 that ssh into a node and issue a shutdown command (such devices might be
 suitable for testing, but never for production).
 
 .. index::
    single: fencing; agent
 
 Fence Agents
 ############
 
 A *fence agent* or *fencing agent* is a ``stonith``-class resource agent.
 
 The fence agent standard provides commands (such as ``off`` and ``reboot``)
 that the cluster can use to fence nodes. As with other resource agent classes,
 this allows a layer of abstraction so that Pacemaker doesn't need any knowledge
 about specific fencing technologies -- that knowledge is isolated in the agent.
 
 Pacemaker supports two fence agent standards, both inherited from
 no-longer-active projects:
 
 * Red Hat Cluster Suite (RHCS) style: These are typically installed in
   ``/usr/sbin`` with names starting with ``fence_``.
 
 * Linux-HA style: These typically have names starting with ``external/``.
   Pacemaker can support these agents using the **fence_legacy** RHCS-style
   agent as a wrapper, *if* support was enabled when Pacemaker was built, which
   requires the ``cluster-glue`` library.
 
 When a Fence Device Can Be Used
 ###############################
 
 Fencing devices do not actually "run" like most services. Typically, they just
 provide an interface for sending commands to an external device.
 
 Additionally, fencing may be initiated by Pacemaker, by other cluster-aware
 software such as DRBD or DLM, or manually by an administrator, at any point in
 the cluster life cycle, including before any resources have been started.
 
 To accommodate this, Pacemaker does not require the fence device resource to be
 "started" in order to be used. Whether a fence device is started or not
 determines whether a node runs any recurring monitor for the device, and gives
 the node a slight preference for being chosen to execute fencing using that
 device.
 
 By default, any node can execute any fencing device. If a fence device is
 disabled by setting its ``target-role`` to ``Stopped``, then no node can use
 that device. If a location constraint with a negative score prevents a specific
 node from "running" a fence device, then that node will never be chosen to
 execute fencing using the device. A node may fence itself, but the cluster will
 choose that only if no other nodes can do the fencing.
 
 A common configuration scenario is to have one fence device per target node.
 In such a case, users often configure anti-location constraints so that
 the target node does not monitor its own device.
 
 Limitations of Fencing Resources
 ################################
 
 Fencing resources have certain limitations that other resource classes don't:
 
 * They may have only one set of meta-attributes and one set of instance
   attributes.
 * If :ref:`rules` are used to determine fencing resource options, these
   might be evaluated only when first read, meaning that later changes to the
   rules will have no effect. Therefore, it is better to avoid confusion and not
   use rules at all with fencing resources.
 
 These limitations could be revisited if there is sufficient user demand.
 
 .. index::
    single: fencing; special instance attributes
 
 .. _fencing-attributes:
 
 Special Meta-Attributes for Fencing Resources
 #############################################
 
 The table below lists special resource meta-attributes that may be set for any
 fencing resource.
 
 .. table:: **Additional Properties of Fencing Resources**
+   :widths: 2 1 2 4
+
 
    +----------------------+---------+--------------------+----------------------------------------+
    | Field                | Type    | Default            | Description                            |
    +======================+=========+====================+========================================+
    | provides             | string  |                    | .. index::                             |
    |                      |         |                    |    single: provides                    |
    |                      |         |                    |                                        |
    |                      |         |                    | Any special capability provided by the |
    |                      |         |                    | fence device. Currently, only one such |
    |                      |         |                    | capability is meaningful:              |
    |                      |         |                    | :ref:`unfencing <unfencing>`.          |
    +----------------------+---------+--------------------+----------------------------------------+
 
 Special Instance Attributes for Fencing Resources
 #################################################
 
 The table below lists special instance attributes that may be set for any
 fencing resource (*not* meta-attributes, even though they are interpreted by
 Pacemaker rather than the fence agent). These are also listed in the man page
 for ``pacemaker-fenced``.
 
 .. Not_Yet_Implemented:
 
    +----------------------+---------+--------------------+----------------------------------------+
    | priority             | integer | 0                  | .. index::                             |
    |                      |         |                    |    single: priority                    |
    |                      |         |                    |                                        |
    |                      |         |                    | The priority of the fence device.      |
    |                      |         |                    | Devices are tried in order of highest  |
    |                      |         |                    | priority to lowest.                    |
    +----------------------+---------+--------------------+----------------------------------------+
 
 .. table:: **Additional Properties of Fencing Resources**
+   :class: longtable
+   :widths: 2 1 2 4
 
    +----------------------+---------+--------------------+----------------------------------------+
    | Field                | Type    | Default            | Description                            |
    +======================+=========+====================+========================================+
    | stonith-timeout      | time    |                    | .. index::                             |
    |                      |         |                    |    single: stonith-timeout             |
    |                      |         |                    |                                        |
    |                      |         |                    | This is not used by Pacemaker (see the |
    |                      |         |                    | ``pcmk_reboot_timeout``,               |
    |                      |         |                    | ``pcmk_off_timeout``, etc. properties  |
    |                      |         |                    | instead), but it may be used by        |
    |                      |         |                    | Linux-HA fence agents.                 |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_host_map        | string  |                    | .. index::                             |
    |                      |         |                    |    single: pcmk_host_map               |
    |                      |         |                    |                                        |
    |                      |         |                    | A mapping of node names to ports       |
    |                      |         |                    | for devices that do not understand     |
    |                      |         |                    | the node names.                        |
    |                      |         |                    |                                        |
    |                      |         |                    | Example: ``node1:1;node2:2,3`` tells   |
    |                      |         |                    | the cluster to use port 1 for          |
    |                      |         |                    | ``node1`` and ports 2 and 3 for        |
    |                      |         |                    | ``node2``. If ``pcmk_host_check`` is   |
    |                      |         |                    | explicitly set to ``static-list``,     |
    |                      |         |                    | either this or ``pcmk_host_list`` must |
    |                      |         |                    | be set. The port portion of the map    |
    |                      |         |                    | may contain special characters such as |
    |                      |         |                    | spaces if preceded by a backslash      |
    |                      |         |                    | *(since 2.1.2)*.                       |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_host_list       | string  |                    | .. index::                             |
    |                      |         |                    |    single: pcmk_host_list              |
    |                      |         |                    |                                        |
    |                      |         |                    | A list of machines controlled by this  |
    |                      |         |                    | device. If ``pcmk_host_check`` is      |
    |                      |         |                    | explicitly set to ``static-list``,     |
    |                      |         |                    | either this or ``pcmk_host_map`` must  |
    |                      |         |                    | be set.                                |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_host_check      | string  | Value appropriate  | .. index::                             |
    |                      |         | to other           |    single: pcmk_host_check             |
    |                      |         | parameters (see    |                                        |
    |                      |         | "Default Check     | The method Pacemaker should use to     |
    |                      |         | Type" below)       | determine which nodes can be targeted  |
    |                      |         |                    | by this device. Allowed values:        |
    |                      |         |                    |                                        |
    |                      |         |                    | * ``static-list:`` targets are listed  |
    |                      |         |                    |   in the ``pcmk_host_list`` or         |
    |                      |         |                    |   ``pcmk_host_map`` attribute          |
    |                      |         |                    | * ``dynamic-list:`` query the device   |
    |                      |         |                    |   via the agent's ``list`` action      |
    |                      |         |                    | * ``status:`` query the device via the |
    |                      |         |                    |   agent's ``status`` action            |
    |                      |         |                    | * ``none:`` assume the device can      |
    |                      |         |                    |   fence any node                       |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_delay_max       | time    | 0s                 | .. index::                             |
    |                      |         |                    |    single: pcmk_delay_max              |
    |                      |         |                    |                                        |
    |                      |         |                    | Enable a delay of no more than the     |
    |                      |         |                    | time specified before executing        |
    |                      |         |                    | fencing actions. Pacemaker derives the |
    |                      |         |                    | overall delay by taking the value of   |
    |                      |         |                    | pcmk_delay_base and adding a random    |
    |                      |         |                    | delay value such that the sum is kept  |
    |                      |         |                    | below this maximum. This is sometimes  |
    |                      |         |                    | used in two-node clusters to ensure    |
    |                      |         |                    | that the nodes don't fence each other  |
    |                      |         |                    | at the same time.                      |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_delay_base      | time    | 0s                 | .. index::                             |
    |                      |         |                    |    single: pcmk_delay_base             |
    |                      |         |                    |                                        |
    |                      |         |                    | Enable a static delay before executing |
    |                      |         |                    | fencing actions. This can be used, for |
    |                      |         |                    | example, in two-node clusters to       |
    |                      |         |                    | ensure that the nodes don't fence each |
    |                      |         |                    | other, by having separate fencing      |
    |                      |         |                    | resources with different values. The   |
    |                      |         |                    | node that is fenced with the shorter   |
    |                      |         |                    | delay will lose a fencing race. The    |
    |                      |         |                    | overall delay introduced by pacemaker  |
    |                      |         |                    | is derived from this value plus a      |
    |                      |         |                    | random delay such that the sum is kept |
    |                      |         |                    | below the maximum delay. A single      |
    |                      |         |                    | device can have different delays per   |
    |                      |         |                    | node using a host map *(since 2.1.2)*, |
    |                      |         |                    | for example ``node1:0s;node2:5s.``     |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_action_limit    | integer | 1                  | .. index::                             |
    |                      |         |                    |    single: pcmk_action_limit           |
    |                      |         |                    |                                        |
    |                      |         |                    | The maximum number of actions that can |
    |                      |         |                    | be performed in parallel on this       |
    |                      |         |                    | device. A value of -1 means unlimited. |
    |                      |         |                    | Node fencing actions initiated by the  |
    |                      |         |                    | cluster (as opposed to an administrator|
    |                      |         |                    | running the ``stonith_admin`` tool or  |
    |                      |         |                    | the fencer running recurring device    |
    |                      |         |                    | monitors and ``status`` and ``list``   |
    |                      |         |                    | commands) are additionally subject to  |
    |                      |         |                    | the ``concurrent-fencing`` cluster     |
    |                      |         |                    | property.                              |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_host_argument   | string  | ``port`` otherwise | .. index::                             |
    |                      |         | ``plug`` if        |    single: pcmk_host_argument          |
    |                      |         | supported          |                                        |
    |                      |         | according to the   | *Advanced use only.* Which parameter   |
    |                      |         | metadata of the    | should be supplied to the fence agent  |
    |                      |         | fence agent        | to identify the node to be fenced.     |
    |                      |         |                    | Some devices support neither the       |
    |                      |         |                    | standard ``plug`` nor the deprecated   |
    |                      |         |                    | ``port`` parameter, or may provide     |
    |                      |         |                    | additional ones. Use this to specify   |
    |                      |         |                    | an alternate, device-specific          |
    |                      |         |                    | parameter. A value of ``none`` tells   |
    |                      |         |                    | the cluster not to supply any          |
    |                      |         |                    | additional parameters.                 |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_reboot_action   | string  | reboot             | .. index::                             |
    |                      |         |                    |    single: pcmk_reboot_action          |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The command to    |
    |                      |         |                    | send to the resource agent in order to |
    |                      |         |                    | reboot a node. Some devices do not     |
    |                      |         |                    | support the standard commands or may   |
    |                      |         |                    | provide additional ones. Use this to   |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | command.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_reboot_timeout  | time    | 60s                | .. index::                             |
    |                      |         |                    |    single: pcmk_reboot_timeout         |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* Specify an        |
    |                      |         |                    | alternate timeout to use for           |
    |                      |         |                    | ``reboot`` actions instead of the      |
    |                      |         |                    | value of ``stonith-timeout``. Some     |
    |                      |         |                    | devices need much more or less time to |
    |                      |         |                    | complete than normal. Use this to      |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | timeout.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_reboot_retries  | integer | 2                  | .. index::                             |
    |                      |         |                    |    single: pcmk_reboot_retries         |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The maximum       |
    |                      |         |                    | number of times to retry the           |
    |                      |         |                    | ``reboot`` command within the timeout  |
    |                      |         |                    | period. Some devices do not support    |
    |                      |         |                    | multiple connections, and operations   |
    |                      |         |                    | may fail if the device is busy with    |
    |                      |         |                    | another task, so Pacemaker will        |
    |                      |         |                    | automatically retry the operation, if  |
    |                      |         |                    | there is time remaining. Use this      |
    |                      |         |                    | option to alter the number of times    |
    |                      |         |                    | Pacemaker retries before giving up.    |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_off_action      | string  | off                | .. index::                             |
    |                      |         |                    |    single: pcmk_off_action             |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The command to    |
    |                      |         |                    | send to the resource agent in order to |
    |                      |         |                    | shut down a node. Some devices do not  |
    |                      |         |                    | support the standard commands or may   |
    |                      |         |                    | provide additional ones. Use this to   |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | command.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_off_timeout     | time    | 60s                | .. index::                             |
    |                      |         |                    |    single: pcmk_off_timeout            |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* Specify an        |
    |                      |         |                    | alternate timeout to use for           |
    |                      |         |                    | ``off`` actions instead of the         |
    |                      |         |                    | value of ``stonith-timeout``. Some     |
    |                      |         |                    | devices need much more or less time to |
    |                      |         |                    | complete than normal. Use this to      |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | timeout.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_off_retries     | integer | 2                  | .. index::                             |
    |                      |         |                    |    single: pcmk_off_retries            |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The maximum       |
    |                      |         |                    | number of times to retry the           |
    |                      |         |                    | ``off`` command within the timeout     |
    |                      |         |                    | period. Some devices do not support    |
    |                      |         |                    | multiple connections, and operations   |
    |                      |         |                    | may fail if the device is busy with    |
    |                      |         |                    | another task, so Pacemaker will        |
    |                      |         |                    | automatically retry the operation, if  |
    |                      |         |                    | there is time remaining. Use this      |
    |                      |         |                    | option to alter the number of times    |
    |                      |         |                    | Pacemaker retries before giving up.    |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_list_action     | string  | list               | .. index::                             |
    |                      |         |                    |    single: pcmk_list_action            |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The command to    |
    |                      |         |                    | send to the resource agent in order to |
    |                      |         |                    | list nodes. Some devices do not        |
    |                      |         |                    | support the standard commands or may   |
    |                      |         |                    | provide additional ones. Use this to   |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | command.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_list_timeout    | time    | 60s                | .. index::                             |
    |                      |         |                    |    single: pcmk_list_timeout           |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* Specify an        |
    |                      |         |                    | alternate timeout to use for           |
    |                      |         |                    | ``list`` actions instead of the        |
    |                      |         |                    | value of ``stonith-timeout``. Some     |
    |                      |         |                    | devices need much more or less time to |
    |                      |         |                    | complete than normal. Use this to      |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | timeout.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_list_retries    | integer | 2                  | .. index::                             |
    |                      |         |                    |    single: pcmk_list_retries           |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The maximum       |
    |                      |         |                    | number of times to retry the           |
    |                      |         |                    | ``list`` command within the timeout    |
    |                      |         |                    | period. Some devices do not support    |
    |                      |         |                    | multiple connections, and operations   |
    |                      |         |                    | may fail if the device is busy with    |
    |                      |         |                    | another task, so Pacemaker will        |
    |                      |         |                    | automatically retry the operation, if  |
    |                      |         |                    | there is time remaining. Use this      |
    |                      |         |                    | option to alter the number of times    |
    |                      |         |                    | Pacemaker retries before giving up.    |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_monitor_action  | string  | monitor            | .. index::                             |
    |                      |         |                    |    single: pcmk_monitor_action         |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The command to    |
    |                      |         |                    | send to the resource agent in order to |
    |                      |         |                    | report extended status. Some devices do|
    |                      |         |                    | not support the standard commands or   |
    |                      |         |                    | may provide additional ones. Use this  |
    |                      |         |                    | to specify an alternate,               |
    |                      |         |                    | device-specific command.               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_monitor_timeout | time    | 60s                | .. index::                             |
    |                      |         |                    |    single: pcmk_monitor_timeout        |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* Specify an        |
    |                      |         |                    | alternate timeout to use for           |
    |                      |         |                    | ``monitor`` actions instead of the     |
    |                      |         |                    | value of ``stonith-timeout``. Some     |
    |                      |         |                    | devices need much more or less time to |
    |                      |         |                    | complete than normal. Use this to      |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | timeout.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_monitor_retries | integer | 2                  | .. index::                             |
    |                      |         |                    |    single: pcmk_monitor_retries        |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The maximum       |
    |                      |         |                    | number of times to retry the           |
    |                      |         |                    | ``monitor`` command within the timeout |
    |                      |         |                    | period. Some devices do not support    |
    |                      |         |                    | multiple connections, and operations   |
    |                      |         |                    | may fail if the device is busy with    |
    |                      |         |                    | another task, so Pacemaker will        |
    |                      |         |                    | automatically retry the operation, if  |
    |                      |         |                    | there is time remaining. Use this      |
    |                      |         |                    | option to alter the number of times    |
    |                      |         |                    | Pacemaker retries before giving up.    |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_status_action   | string  | status             | .. index::                             |
    |                      |         |                    |    single: pcmk_status_action          |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The command to    |
    |                      |         |                    | send to the resource agent in order to |
    |                      |         |                    | report status. Some devices do         |
    |                      |         |                    | not support the standard commands or   |
    |                      |         |                    | may provide additional ones. Use this  |
    |                      |         |                    | to specify an alternate,               |
    |                      |         |                    | device-specific command.               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_status_timeout  | time    | 60s                | .. index::                             |
    |                      |         |                    |    single: pcmk_status_timeout         |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* Specify an        |
    |                      |         |                    | alternate timeout to use for           |
    |                      |         |                    | ``status`` actions instead of the      |
    |                      |         |                    | value of ``stonith-timeout``. Some     |
    |                      |         |                    | devices need much more or less time to |
    |                      |         |                    | complete than normal. Use this to      |
    |                      |         |                    | specify an alternate, device-specific  |
    |                      |         |                    | timeout.                               |
    +----------------------+---------+--------------------+----------------------------------------+
    | pcmk_status_retries  | integer | 2                  | .. index::                             |
    |                      |         |                    |    single: pcmk_status_retries         |
    |                      |         |                    |                                        |
    |                      |         |                    | *Advanced use only.* The maximum       |
    |                      |         |                    | number of times to retry the           |
    |                      |         |                    | ``status`` command within the timeout  |
    |                      |         |                    | period. Some devices do not support    |
    |                      |         |                    | multiple connections, and operations   |
    |                      |         |                    | may fail if the device is busy with    |
    |                      |         |                    | another task, so Pacemaker will        |
    |                      |         |                    | automatically retry the operation, if  |
    |                      |         |                    | there is time remaining. Use this      |
    |                      |         |                    | option to alter the number of times    |
    |                      |         |                    | Pacemaker retries before giving up.    |
    +----------------------+---------+--------------------+----------------------------------------+
 
 Default Check Type
 ##################
 
 If the user does not explicitly configure ``pcmk_host_check`` for a fence
 device, a default value appropriate to other configured parameters will be
 used:
 
 * If either ``pcmk_host_list`` or ``pcmk_host_map`` is configured,
   ``static-list`` will be used;
 * otherwise, if the fence device supports the ``list`` action, and the first
   attempt at using ``list`` succeeds, ``dynamic-list`` will be used;
 * otherwise, if the fence device supports the ``status`` action, ``status``
   will be used;
 * otherwise, ``none`` will be used.
 
 .. index::
    single: unfencing
    single: fencing; unfencing
 
 .. _unfencing:
 
 Unfencing
 #########
 
 With fabric fencing (such as cutting network or shared disk access rather than
 power), it is expected that the cluster will fence the node, and then a system
 administrator must manually investigate what went wrong, correct any issues
 found, then reboot (or restart the cluster services on) the node.
 
 Once the node reboots and rejoins the cluster, some fabric fencing devices
 require an explicit command to restore the node's access. This capability is
 called *unfencing* and is typically implemented as the fence agent's ``on``
 command.
 
 If any cluster resource has ``requires`` set to ``unfencing``, then that
 resource will not be probed or started on a node until that node has been
 unfenced.
 
 Fencing and Quorum
 ##################
 
 In general, a cluster partition may execute fencing only if the partition has
 quorum, and the ``stonith-enabled`` cluster property is set to true. However,
 there are exceptions:
 
 * The requirements apply only to fencing initiated by Pacemaker. If an
   administrator initiates fencing using the ``stonith_admin`` command, or an
   external application such as DLM initiates fencing using Pacemaker's C API,
   the requirements do not apply.
 
 * A cluster partition without quorum is allowed to fence any active member of
   that partition. As a corollary, this allows a ``no-quorum-policy`` of
   ``suicide`` to work.
 
 * If the ``no-quorum-policy`` cluster property is set to ``ignore``, then
   quorum is not required to execute fencing of any node.
 
 Fencing Timeouts
 ################
 
 Fencing timeouts are complicated, since a single fencing operation can involve
 many steps, each of which may have a separate timeout.
 
 Fencing may be initiated in one of several ways:
 
 * An administrator may initiate fencing using the ``stonith_admin`` tool,
   which has a ``--timeout`` option (defaulting to 2 minutes) that will be used
   as the fence operation timeout.
 
 * An external application such as DLM may initiate fencing using the Pacemaker
   C API. The application will specify the fence operation timeout in this case,
   which might or might not be configurable by the user.
 
 * The cluster may initiate fencing itself. In this case, the
   ``stonith-timeout`` cluster property (defaulting to 1 minute) will be used as
   the fence operation timeout.
 
 However fencing is initiated, the initiator contacts Pacemaker's fencer
 (``pacemaker-fenced``) to request fencing. This connection and request has its
 own timeout, separate from the fencing operation timeout, but usually happens
 very quickly.
 
 The fencer will contact all fencers in the cluster to ask what devices they
 have available to fence the target node. The fence operation timeout will be
 used as the timeout for each of these queries.
 
 Once a fencing device has been selected, the fencer will check whether any
 action-specific timeout has been configured for the device, to use instead of
 the fence operation timeout. For example, if ``stonith-timeout`` is 60 seconds,
 but the fencing device has ``pcmk_reboot_timeout`` configured as 90 seconds,
 then a timeout of 90 seconds will be used for reboot actions using that device.
 
 A device may have retries configured, in which case the timeout applies across
 all attempts. For example, if a device has ``pcmk_reboot_retries`` configured
 as 2, and the first reboot attempt fails, the second attempt will only have
 whatever time is remaining in the action timeout after subtracting how much
 time the first attempt used. This means that if the first attempt fails due to
 using the entire timeout, no further attempts will be made. There is currently
 no way to configure a per-attempt timeout.
 
 If more than one device is required to fence a target, whether due to failure
 of the first device or a fencing topology with multiple devices configured for
 the target, each device will have its own separate action timeout.
 
 For all of the above timeouts, the fencer will generally multiply the
 configured value by 1.2 to get an actual value to use, to account for time
 needed by the fencer's own processing.
 
 Separate from the fencer's timeouts, some fence agents have internal timeouts
 for individual steps of their fencing process. These agents often have
 parameters to configure these timeouts, such as ``login-timeout``,
 ``shell-timeout``, or ``power-timeout``. Many such agents also have a
 ``disable-timeout`` parameter to ignore their internal timeouts and just let
 Pacemaker handle the timeout. This causes a difference in retry behavior.
 If ``disable-timeout`` is not set, and the agent hits one of its internal
 timeouts, it will report that as a failure to Pacemaker, which can then retry.
 If ``disable-timeout`` is set, and Pacemaker hits a timeout for the agent, then
 there will be no time remaining, and no retry will be done.
 
 Fence Devices Dependent on Other Resources
 ##########################################
 
 In some cases, a fence device may require some other cluster resource (such as
 an IP address) to be active in order to function properly.
 
 This is obviously undesirable in general: fencing may be required when the
 depended-on resource is not active, or fencing may be required because the node
 running the depended-on resource is no longer responding.
 
 However, this may be acceptable under certain conditions:
 
 * The dependent fence device should not be able to target any node that is
   allowed to run the depended-on resource.
 
 * The depended-on resource should not be disabled during production operation.
 
 * The ``concurrent-fencing`` cluster property should be set to ``true``.
   Otherwise, if both the node running the depended-on resource and some node
   targeted by the dependent fence device need to be fenced, the fencing of the
   node running the depended-on resource might be ordered first, making the
   second fencing impossible and blocking further recovery. With concurrent
   fencing, the dependent fence device might fail at first due to the
   depended-on resource being unavailable, but it will be retried and eventually
   succeed once the resource is brought back up.
 
 Even under those conditions, there is one unlikely problem scenario. The DC
 always schedules fencing of itself after any other fencing needed, to avoid
 unnecessary repeated DC elections. If the dependent fence device targets the
 DC, and both the DC and a different node running the depended-on resource need
 to be fenced, the DC fencing will always fail and block further recovery. Note,
 however, that losing a DC node entirely causes some other node to become DC and
 schedule the fencing, so this is only a risk when a stop or other operation
 with ``on-fail`` set to ``fencing`` fails on the DC.
 
 .. index::
    single: fencing; configuration
 
 Configuring Fencing
 ###################
 
 Higher-level tools can provide simpler interfaces to this process, but using
 Pacemaker command-line tools, this is how you could configure a fence device.
 
 #. Find the correct driver:
 
    .. code-block:: none
 
       # stonith_admin --list-installed
 
    .. note::
 
       You may have to install packages to make fence agents available on your
       host. Searching your available packages for ``fence-`` is usually
       helpful. Ensure the packages providing the fence agents you require are
       installed on every cluster node.
 
 #. Find the required parameters associated with the device
    (replacing ``$AGENT_NAME`` with the name obtained from the previous step):
 
    .. code-block:: none
 
       # stonith_admin --metadata --agent $AGENT_NAME
 
 #. Create a file called ``stonith.xml`` containing a primitive resource
    with a class of ``stonith``, a type equal to the agent name obtained earlier,
    and a parameter for each of the values returned in the previous step.
 
 #. If the device does not know how to fence nodes based on their uname,
    you may also need to set the special ``pcmk_host_map`` parameter.  See
    :ref:`fencing-attributes` for details.
 
 #. If the device does not support the ``list`` command, you may also need
    to set the special ``pcmk_host_list`` and/or ``pcmk_host_check``
    parameters.  See :ref:`fencing-attributes` for details.
 
 #. If the device does not expect the victim to be specified with the
    ``port`` parameter, you may also need to set the special
    ``pcmk_host_argument`` parameter. See :ref:`fencing-attributes` for details.
 
 #. Upload it into the CIB using cibadmin:
 
    .. code-block:: none
 
       # cibadmin --create --scope resources --xml-file stonith.xml
 
 #. Set ``stonith-enabled`` to true:
 
    .. code-block:: none
 
       # crm_attribute --type crm_config --name stonith-enabled --update true
 
 #. Once the stonith resource is running, you can test it by executing the
    following, replacing ``$NODE_NAME`` with the name of the node to fence
    (although you might want to stop the cluster on that machine first):
 
    .. code-block:: none
 
       # stonith_admin --reboot $NODE_NAME
 
 
 Example Fencing Configuration
 _____________________________
 
 For this example, we assume we have a cluster node, ``pcmk-1``, whose IPMI
 controller is reachable at the IP address 192.0.2.1. The IPMI controller uses
 the username ``testuser`` and the password ``abc123``.
 
 #. Looking at what's installed, we may see a variety of available agents:
 
    .. code-block:: none
 
       # stonith_admin --list-installed
 
    .. code-block:: none
 
       (... some output omitted ...)
       fence_idrac
       fence_ilo3
       fence_ilo4
       fence_ilo5
       fence_imm
       fence_ipmilan
       (... some output omitted ...)
 
    Perhaps after some reading some man pages and doing some Internet searches,
    we might decide ``fence_ipmilan`` is our best choice.
 
 #. Next, we would check what parameters ``fence_ipmilan`` provides:
 
    .. code-block:: none
 
       # stonith_admin --metadata -a fence_ipmilan
 
    .. code-block:: xml
 
       <resource-agent name="fence_ipmilan" shortdesc="Fence agent for IPMI">
         <symlink name="fence_ilo3" shortdesc="Fence agent for HP iLO3"/>
         <symlink name="fence_ilo4" shortdesc="Fence agent for HP iLO4"/>
         <symlink name="fence_ilo5" shortdesc="Fence agent for HP iLO5"/>
         <symlink name="fence_imm" shortdesc="Fence agent for IBM Integrated Management Module"/>
         <symlink name="fence_idrac" shortdesc="Fence agent for Dell iDRAC"/>
         <longdesc>fence_ipmilan is an I/O Fencing agentwhich can be used with machines controlled by IPMI.This agent calls support software ipmitool (http://ipmitool.sf.net/). WARNING! This fence agent might report success before the node is powered off. You should use -m/method onoff if your fence device works correctly with that option.</longdesc>
         <vendor-url/>
         <parameters>
           <parameter name="action" unique="0" required="0">
             <getopt mixed="-o, --action=[action]"/>
             <content type="string" default="reboot"/>
             <shortdesc lang="en">Fencing action</shortdesc>
           </parameter>
           <parameter name="auth" unique="0" required="0">
             <getopt mixed="-A, --auth=[auth]"/>
             <content type="select">
               <option value="md5"/>
               <option value="password"/>
               <option value="none"/>
             </content>
             <shortdesc lang="en">IPMI Lan Auth type.</shortdesc>
           </parameter>
           <parameter name="cipher" unique="0" required="0">
             <getopt mixed="-C, --cipher=[cipher]"/>
             <content type="string"/>
             <shortdesc lang="en">Ciphersuite to use (same as ipmitool -C parameter)</shortdesc>
           </parameter>
           <parameter name="hexadecimal_kg" unique="0" required="0">
             <getopt mixed="--hexadecimal-kg=[key]"/>
             <content type="string"/>
             <shortdesc lang="en">Hexadecimal-encoded Kg key for IPMIv2 authentication</shortdesc>
           </parameter>
           <parameter name="ip" unique="0" required="0" obsoletes="ipaddr">
             <getopt mixed="-a, --ip=[ip]"/>
             <content type="string"/>
             <shortdesc lang="en">IP address or hostname of fencing device</shortdesc>
           </parameter>
           <parameter name="ipaddr" unique="0" required="0" deprecated="1">
             <getopt mixed="-a, --ip=[ip]"/>
             <content type="string"/>
             <shortdesc lang="en">IP address or hostname of fencing device</shortdesc>
           </parameter>
           <parameter name="ipport" unique="0" required="0">
             <getopt mixed="-u, --ipport=[port]"/>
             <content type="integer" default="623"/>
             <shortdesc lang="en">TCP/UDP port to use for connection with device</shortdesc>
           </parameter>
           <parameter name="lanplus" unique="0" required="0">
             <getopt mixed="-P, --lanplus"/>
             <content type="boolean" default="0"/>
             <shortdesc lang="en">Use Lanplus to improve security of connection</shortdesc>
           </parameter>
           <parameter name="login" unique="0" required="0" deprecated="1">
             <getopt mixed="-l, --username=[name]"/>
             <content type="string"/>
             <shortdesc lang="en">Login name</shortdesc>
           </parameter>
           <parameter name="method" unique="0" required="0">
             <getopt mixed="-m, --method=[method]"/>
             <content type="select" default="onoff">
               <option value="onoff"/>
               <option value="cycle"/>
             </content>
             <shortdesc lang="en">Method to fence</shortdesc>
           </parameter>
           <parameter name="passwd" unique="0" required="0" deprecated="1">
             <getopt mixed="-p, --password=[password]"/>
             <content type="string"/>
             <shortdesc lang="en">Login password or passphrase</shortdesc>
           </parameter>
           <parameter name="passwd_script" unique="0" required="0" deprecated="1">
             <getopt mixed="-S, --password-script=[script]"/>
             <content type="string"/>
             <shortdesc lang="en">Script to run to retrieve password</shortdesc>
           </parameter>
           <parameter name="password" unique="0" required="0" obsoletes="passwd">
             <getopt mixed="-p, --password=[password]"/>
             <content type="string"/>
             <shortdesc lang="en">Login password or passphrase</shortdesc>
           </parameter>
           <parameter name="password_script" unique="0" required="0" obsoletes="passwd_script">
             <getopt mixed="-S, --password-script=[script]"/>
             <content type="string"/>
             <shortdesc lang="en">Script to run to retrieve password</shortdesc>
           </parameter>
           <parameter name="plug" unique="0" required="0" obsoletes="port">
             <getopt mixed="-n, --plug=[ip]"/>
             <content type="string"/>
             <shortdesc lang="en">IP address or hostname of fencing device (together with --port-as-ip)</shortdesc>
           </parameter>
           <parameter name="port" unique="0" required="0" deprecated="1">
             <getopt mixed="-n, --plug=[ip]"/>
             <content type="string"/>
             <shortdesc lang="en">IP address or hostname of fencing device (together with --port-as-ip)</shortdesc>
           </parameter>
           <parameter name="privlvl" unique="0" required="0">
             <getopt mixed="-L, --privlvl=[level]"/>
             <content type="select" default="administrator">
               <option value="callback"/>
               <option value="user"/>
               <option value="operator"/>
               <option value="administrator"/>
             </content>
             <shortdesc lang="en">Privilege level on IPMI device</shortdesc>
           </parameter>
           <parameter name="target" unique="0" required="0">
             <getopt mixed="--target=[targetaddress]"/>
             <content type="string"/>
             <shortdesc lang="en">Bridge IPMI requests to the remote target address</shortdesc>
           </parameter>
           <parameter name="username" unique="0" required="0" obsoletes="login">
             <getopt mixed="-l, --username=[name]"/>
             <content type="string"/>
             <shortdesc lang="en">Login name</shortdesc>
           </parameter>
           <parameter name="quiet" unique="0" required="0">
             <getopt mixed="-q, --quiet"/>
             <content type="boolean"/>
             <shortdesc lang="en">Disable logging to stderr. Does not affect --verbose or --debug-file or logging to syslog.</shortdesc>
           </parameter>
           <parameter name="verbose" unique="0" required="0">
             <getopt mixed="-v, --verbose"/>
             <content type="boolean"/>
             <shortdesc lang="en">Verbose mode</shortdesc>
           </parameter>
           <parameter name="debug" unique="0" required="0" deprecated="1">
             <getopt mixed="-D, --debug-file=[debugfile]"/>
             <content type="string"/>
             <shortdesc lang="en">Write debug information to given file</shortdesc>
           </parameter>
           <parameter name="debug_file" unique="0" required="0" obsoletes="debug">
             <getopt mixed="-D, --debug-file=[debugfile]"/>
             <content type="string"/>
             <shortdesc lang="en">Write debug information to given file</shortdesc>
           </parameter>
           <parameter name="version" unique="0" required="0">
             <getopt mixed="-V, --version"/>
             <content type="boolean"/>
             <shortdesc lang="en">Display version information and exit</shortdesc>
           </parameter>
           <parameter name="help" unique="0" required="0">
             <getopt mixed="-h, --help"/>
             <content type="boolean"/>
             <shortdesc lang="en">Display help and exit</shortdesc>
           </parameter>
           <parameter name="delay" unique="0" required="0">
             <getopt mixed="--delay=[seconds]"/>
             <content type="second" default="0"/>
             <shortdesc lang="en">Wait X seconds before fencing is started</shortdesc>
           </parameter>
           <parameter name="ipmitool_path" unique="0" required="0">
             <getopt mixed="--ipmitool-path=[path]"/>
             <content type="string" default="/usr/bin/ipmitool"/>
             <shortdesc lang="en">Path to ipmitool binary</shortdesc>
           </parameter>
           <parameter name="login_timeout" unique="0" required="0">
             <getopt mixed="--login-timeout=[seconds]"/>
             <content type="second" default="5"/>
             <shortdesc lang="en">Wait X seconds for cmd prompt after login</shortdesc>
           </parameter>
           <parameter name="port_as_ip" unique="0" required="0">
             <getopt mixed="--port-as-ip"/>
             <content type="boolean"/>
             <shortdesc lang="en">Make "port/plug" to be an alias to IP address</shortdesc>
           </parameter>
           <parameter name="power_timeout" unique="0" required="0">
             <getopt mixed="--power-timeout=[seconds]"/>
             <content type="second" default="20"/>
             <shortdesc lang="en">Test X seconds for status change after ON/OFF</shortdesc>
           </parameter>
           <parameter name="power_wait" unique="0" required="0">
             <getopt mixed="--power-wait=[seconds]"/>
             <content type="second" default="2"/>
             <shortdesc lang="en">Wait X seconds after issuing ON/OFF</shortdesc>
           </parameter>
           <parameter name="shell_timeout" unique="0" required="0">
             <getopt mixed="--shell-timeout=[seconds]"/>
             <content type="second" default="3"/>
             <shortdesc lang="en">Wait X seconds for cmd prompt after issuing command</shortdesc>
           </parameter>
           <parameter name="retry_on" unique="0" required="0">
             <getopt mixed="--retry-on=[attempts]"/>
             <content type="integer" default="1"/>
             <shortdesc lang="en">Count of attempts to retry power on</shortdesc>
           </parameter>
           <parameter name="sudo" unique="0" required="0" deprecated="1">
             <getopt mixed="--use-sudo"/>
             <content type="boolean"/>
             <shortdesc lang="en">Use sudo (without password) when calling 3rd party software</shortdesc>
           </parameter>
           <parameter name="use_sudo" unique="0" required="0" obsoletes="sudo">
             <getopt mixed="--use-sudo"/>
             <content type="boolean"/>
             <shortdesc lang="en">Use sudo (without password) when calling 3rd party software</shortdesc>
           </parameter>
           <parameter name="sudo_path" unique="0" required="0">
             <getopt mixed="--sudo-path=[path]"/>
             <content type="string" default="/usr/bin/sudo"/>
             <shortdesc lang="en">Path to sudo binary</shortdesc>
           </parameter>
         </parameters>
         <actions>
           <action name="on" automatic="0"/>
           <action name="off"/>
           <action name="reboot"/>
           <action name="status"/>
           <action name="monitor"/>
           <action name="metadata"/>
           <action name="manpage"/>
           <action name="validate-all"/>
           <action name="diag"/>
           <action name="stop" timeout="20s"/>
           <action name="start" timeout="20s"/>
         </actions>
       </resource-agent>
 
    Once we've decided what parameter values we think we need, it is a good idea
    to run the fence agent's status action manually, to verify that our values
    work correctly:
 
    .. code-block:: none
 
       # fence_ipmilan --lanplus -a 192.0.2.1 -l testuser -p abc123 -o status
 
       Chassis Power is on
 
 #. Based on that, we might create a fencing resource configuration like this in
    ``stonith.xml`` (or any file name, just use the same name with ``cibadmin``
    later):
 
    .. code-block:: xml
 
       <primitive id="Fencing-pcmk-1" class="stonith" type="fence_ipmilan" >
         <instance_attributes id="Fencing-params" >
           <nvpair id="Fencing-lanplus" name="lanplus" value="1" />
           <nvpair id="Fencing-ip" name="ip" value="192.0.2.1" />
           <nvpair id="Fencing-password" name="password" value="testuser" />
           <nvpair id="Fencing-username" name="username" value="abc123" />
         </instance_attributes>
         <operations >
           <op id="Fencing-monitor-10m" interval="10m" name="monitor" timeout="300s" />
         </operations>
       </primitive>
 
    .. note::
 
       Even though the man page shows that the ``action`` parameter is
       supported, we do not provide that in the resource configuration.
       Pacemaker will supply an appropriate action whenever the fence device
       must be used.
 
 #. In this case, we don't need to configure ``pcmk_host_map`` because
    ``fence_ipmilan`` ignores the target node name and instead uses its
    ``ip`` parameter to know how to contact the IPMI controller.
 
 #. We do need to let Pacemaker know which cluster node can be fenced by this
    device, since ``fence_ipmilan`` doesn't support the ``list`` action. Add
    a line like this to the agent's instance attributes:
 
    .. code-block:: xml
 
           <nvpair id="Fencing-pcmk_host_list" name="pcmk_host_list" value="pcmk-1" />
 
 #. We don't need to configure ``pcmk_host_argument`` since ``ip`` is all the
    fence agent needs (it ignores the target name).
 
 #. Make the configuration active:
 
    .. code-block:: none
 
       # cibadmin --create --scope resources --xml-file stonith.xml
 
 #. Set ``stonith-enabled`` to true (this only has to be done once):
 
    .. code-block:: none
 
       # crm_attribute --type crm_config --name stonith-enabled --update true
 
 #. Since our cluster is still in testing, we can reboot ``pcmk-1`` without
    bothering anyone, so we'll test our fencing configuration by running this
    from one of the other cluster nodes:
 
    .. code-block:: none
 
       # stonith_admin --reboot pcmk-1
 
    Then we will verify that the node did, in fact, reboot.
 
 We can repeat that process to create a separate fencing resource for each node.
 
 With some other fence device types, a single fencing resource is able to be
 used for all nodes. In fact, we could do that with ``fence_ipmilan``, using the
 ``port-as-ip`` parameter along with ``pcmk_host_map``. Either approach is
 fine.
 
 .. index::
    single: fencing; topology
    single: fencing-topology
    single: fencing-level
 
 Fencing Topologies
 ##################
 
 Pacemaker supports fencing nodes with multiple devices through a feature called
 *fencing topologies*. Fencing topologies may be used to provide alternative
 devices in case one fails, or to require multiple devices to all be executed
 successfully in order to consider the node successfully fenced, or even a
 combination of the two.
 
 Create the individual devices as you normally would, then define one or more
 ``fencing-level`` entries in the ``fencing-topology`` section of the
 configuration.
 
 * Each fencing level is attempted in order of ascending ``index``. Allowed
   values are 1 through 9.
 * If a device fails, processing terminates for the current level. No further
   devices in that level are exercised, and the next level is attempted instead.
 * If the operation succeeds for all the listed devices in a level, the level is
   deemed to have passed.
 * The operation is finished when a level has passed (success), or all levels
   have been attempted (failed).
 * If the operation failed, the next step is determined by the scheduler and/or
   the controller.
 
 Some possible uses of topologies include:
 
 * Try on-board IPMI, then an intelligent power switch if that fails
 * Try fabric fencing of both disk and network, then fall back to power fencing
   if either fails
 * Wait up to a certain time for a kernel dump to complete, then cut power to
   the node
 
 .. table:: **Attributes of a fencing-level Element**
+   :class: longtable
+   :widths: 1 4
 
    +------------------+-----------------------------------------------------------------------------------------+
    | Attribute        | Description                                                                             |
    +==================+=========================================================================================+
    | id               | .. index::                                                                              |
    |                  |    pair: fencing-level; id                                                              |
    |                  |                                                                                         |
    |                  | A unique name for this element (required)                                               |
    +------------------+-----------------------------------------------------------------------------------------+
    | target           | .. index::                                                                              |
    |                  |    pair: fencing-level; target                                                          |
    |                  |                                                                                         |
    |                  | The name of a single node to which this level applies                                   |
    +------------------+-----------------------------------------------------------------------------------------+
    | target-pattern   | .. index::                                                                              |
    |                  |    pair: fencing-level; target-pattern                                                  |
    |                  |                                                                                         |
    |                  | An extended regular expression (as defined in `POSIX                                    |
    |                  | <https://pubs.opengroup.org/onlinepubs/9699919799/basedefs/V1_chap09.html#tag_09_04>`_) |
    |                  | matching the names of nodes to which this level applies                                 |
    +------------------+-----------------------------------------------------------------------------------------+
    | target-attribute | .. index::                                                                              |
    |                  |    pair: fencing-level; target-attribute                                                |
    |                  |                                                                                         |
    |                  | The name of a node attribute that is set (to ``target-value``) for nodes to which this  |
    |                  | level applies                                                                           |
    +------------------+-----------------------------------------------------------------------------------------+
    | target-value     | .. index::                                                                              |
    |                  |    pair: fencing-level; target-value                                                    |
    |                  |                                                                                         |
    |                  | The node attribute value (of ``target-attribute``) that is set for nodes to which this  |
    |                  | level applies                                                                           |
    +------------------+-----------------------------------------------------------------------------------------+
    | index            | .. index::                                                                              |
    |                  |    pair: fencing-level; index                                                           |
    |                  |                                                                                         |
    |                  | The order in which to attempt the levels. Levels are attempted in ascending order       |
    |                  | *until one succeeds*. Valid values are 1 through 9.                                     |
    +------------------+-----------------------------------------------------------------------------------------+
    | devices          | .. index::                                                                              |
    |                  |    pair: fencing-level; devices                                                         |
    |                  |                                                                                         |
    |                  | A comma-separated list of devices that must all be tried for this level                 |
    +------------------+-----------------------------------------------------------------------------------------+
 
 .. note:: **Fencing topology with different devices for different nodes**
 
    .. code-block:: xml
 
       <cib crm_feature_set="3.6.0" validate-with="pacemaker-3.5" admin_epoch="1" epoch="0" num_updates="0">
         <configuration>
           ...
           <fencing-topology>
             <!-- For pcmk-1, try poison-pill and fail back to power -->
             <fencing-level id="f-p1.1" target="pcmk-1" index="1" devices="poison-pill"/>
             <fencing-level id="f-p1.2" target="pcmk-1" index="2" devices="power"/>
       
             <!-- For pcmk-2, try disk and network, and fail back to power -->
             <fencing-level id="f-p2.1" target="pcmk-2" index="1" devices="disk,network"/>
             <fencing-level id="f-p2.2" target="pcmk-2" index="2" devices="power"/>
           </fencing-topology>
           ...
         <configuration>
         <status/>
       </cib>
 
 Example Dual-Layer, Dual-Device Fencing Topologies
 __________________________________________________
 
 The following example illustrates an advanced use of ``fencing-topology`` in a
 cluster with the following properties:
 
 * 2 nodes (prod-mysql1 and prod-mysql2)
 * the nodes have IPMI controllers reachable at 192.0.2.1 and 192.0.2.2
 * the nodes each have two independent Power Supply Units (PSUs) connected to
   two independent Power Distribution Units (PDUs) reachable at 198.51.100.1
   (port 10 and port 11) and 203.0.113.1 (port 10 and port 11)
 * fencing via the IPMI controller uses the ``fence_ipmilan`` agent (1 fence device
   per controller, with each device targeting a separate node)
 * fencing via the PDUs uses the ``fence_apc_snmp`` agent (1 fence device per
   PDU, with both devices targeting both nodes)
 * a random delay is used to lessen the chance of a "death match"
 * fencing topology is set to try IPMI fencing first then dual PDU fencing if
   that fails
 
 In a node failure scenario, Pacemaker will first select ``fence_ipmilan`` to
 try to kill the faulty node. Using the fencing topology, if that method fails,
 it will then move on to selecting ``fence_apc_snmp`` twice (once for the first
 PDU, then again for the second PDU).
 
 The fence action is considered successful only if both PDUs report the required
 status. If any of them fails, fencing loops back to the first fencing method,
 ``fence_ipmilan``, and so on, until the node is fenced or the fencing action is
 cancelled.
 
 .. note:: **First fencing method: single IPMI device per target**
 
    Each cluster node has it own dedicated IPMI controller that can be contacted
    for fencing using the following primitives:
 
    .. code-block:: xml
 
       <primitive class="stonith" id="fence_prod-mysql1_ipmi" type="fence_ipmilan">
         <instance_attributes id="fence_prod-mysql1_ipmi-instance_attributes">
           <nvpair id="fence_prod-mysql1_ipmi-instance_attributes-ipaddr" name="ipaddr" value="192.0.2.1"/>
           <nvpair id="fence_prod-mysql1_ipmi-instance_attributes-login" name="login" value="fencing"/>
           <nvpair id="fence_prod-mysql1_ipmi-instance_attributes-passwd" name="passwd" value="finishme"/>
           <nvpair id="fence_prod-mysql1_ipmi-instance_attributes-lanplus" name="lanplus" value="true"/>
           <nvpair id="fence_prod-mysql1_ipmi-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="prod-mysql1"/>
           <nvpair id="fence_prod-mysql1_ipmi-instance_attributes-pcmk_delay_max" name="pcmk_delay_max" value="8s"/>
         </instance_attributes>
       </primitive>
       <primitive class="stonith" id="fence_prod-mysql2_ipmi" type="fence_ipmilan">
         <instance_attributes id="fence_prod-mysql2_ipmi-instance_attributes">
           <nvpair id="fence_prod-mysql2_ipmi-instance_attributes-ipaddr" name="ipaddr" value="192.0.2.2"/>
           <nvpair id="fence_prod-mysql2_ipmi-instance_attributes-login" name="login" value="fencing"/>
           <nvpair id="fence_prod-mysql2_ipmi-instance_attributes-passwd" name="passwd" value="finishme"/>
           <nvpair id="fence_prod-mysql2_ipmi-instance_attributes-lanplus" name="lanplus" value="true"/>
           <nvpair id="fence_prod-mysql2_ipmi-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="prod-mysql2"/>
           <nvpair id="fence_prod-mysql2_ipmi-instance_attributes-pcmk_delay_max" name="pcmk_delay_max" value="8s"/>
         </instance_attributes>
       </primitive>
 
 .. note:: **Second fencing method: dual PDU devices**
 
    Each cluster node also has 2 distinct power supplies controlled by 2
    distinct PDUs:
 
    * Node 1: PDU 1 port 10 and PDU 2 port 10
    * Node 2: PDU 1 port 11 and PDU 2 port 11
 
    The matching fencing agents are configured as follows:
 
    .. code-block:: xml
 
       <primitive class="stonith" id="fence_apc1" type="fence_apc_snmp">
         <instance_attributes id="fence_apc1-instance_attributes">
           <nvpair id="fence_apc1-instance_attributes-ipaddr" name="ipaddr" value="198.51.100.1"/>
           <nvpair id="fence_apc1-instance_attributes-login" name="login" value="fencing"/>
           <nvpair id="fence_apc1-instance_attributes-passwd" name="passwd" value="fencing"/>
           <nvpair id="fence_apc1-instance_attributes-pcmk_host_list"
              name="pcmk_host_map" value="prod-mysql1:10;prod-mysql2:11"/>
           <nvpair id="fence_apc1-instance_attributes-pcmk_delay_max" name="pcmk_delay_max" value="8s"/>
         </instance_attributes>
       </primitive>
       <primitive class="stonith" id="fence_apc2" type="fence_apc_snmp">
         <instance_attributes id="fence_apc2-instance_attributes">
           <nvpair id="fence_apc2-instance_attributes-ipaddr" name="ipaddr" value="203.0.113.1"/>
           <nvpair id="fence_apc2-instance_attributes-login" name="login" value="fencing"/>
           <nvpair id="fence_apc2-instance_attributes-passwd" name="passwd" value="fencing"/>
           <nvpair id="fence_apc2-instance_attributes-pcmk_host_list"
              name="pcmk_host_map" value="prod-mysql1:10;prod-mysql2:11"/>
           <nvpair id="fence_apc2-instance_attributes-pcmk_delay_max" name="pcmk_delay_max" value="8s"/>
         </instance_attributes>
       </primitive>
 
 .. note:: **Fencing topology**
 
    Now that all the fencing resources are defined, it's time to create the
    right topology. We want to first fence using IPMI and if that does not work,
    fence both PDUs to effectively and surely kill the node.
 
    .. code-block:: xml
 
       <fencing-topology>
         <fencing-level id="level-1-1" target="prod-mysql1" index="1" devices="fence_prod-mysql1_ipmi" />
         <fencing-level id="level-1-2" target="prod-mysql1" index="2" devices="fence_apc1,fence_apc2"  />
         <fencing-level id="level-2-1" target="prod-mysql2" index="1" devices="fence_prod-mysql2_ipmi" />
         <fencing-level id="level-2-2" target="prod-mysql2" index="2" devices="fence_apc1,fence_apc2"  />
       </fencing-topology>
 
    In ``fencing-topology``, the lowest ``index`` value for a target determines
    its first fencing method.
 
 Remapping Reboots
 #################
 
 When the cluster needs to reboot a node, whether because ``stonith-action`` is
 ``reboot`` or because a reboot was requested externally (such as by
 ``stonith_admin --reboot``), it will remap that to other commands in two cases:
 
 * If the chosen fencing device does not support the ``reboot`` command, the
   cluster will ask it to perform ``off`` instead.
 
 * If a fencing topology level with multiple devices must be executed, the
   cluster will ask all the devices to perform ``off``, then ask the devices to
   perform ``on``.
 
 To understand the second case, consider the example of a node with redundant
 power supplies connected to intelligent power switches. Rebooting one switch
 and then the other would have no effect on the node. Turning both switches off,
 and then on, actually reboots the node.
 
 In such a case, the fencing operation will be treated as successful as long as
 the ``off`` commands succeed, because then it is safe for the cluster to
 recover any resources that were on the node. Timeouts and errors in the ``on``
 phase will be logged but ignored.
 
 When a reboot operation is remapped, any action-specific timeout for the
 remapped action will be used (for example, ``pcmk_off_timeout`` will be used
 when executing the ``off`` command, not ``pcmk_reboot_timeout``).
diff --git a/doc/sphinx/Pacemaker_Explained/nodes.rst b/doc/sphinx/Pacemaker_Explained/nodes.rst
index cb001739d9..6fcadb378a 100644
--- a/doc/sphinx/Pacemaker_Explained/nodes.rst
+++ b/doc/sphinx/Pacemaker_Explained/nodes.rst
@@ -1,437 +1,441 @@
 Cluster Nodes
 -------------
 
 Defining a Cluster Node
 _______________________
 
 Each cluster node will have an entry in the ``nodes`` section containing at
 least an ID and a name. A cluster node's ID is defined by the cluster layer
 (Corosync).
 
 .. topic:: **Example Corosync cluster node entry**
 
    .. code-block:: xml
 
       <node id="101" uname="pcmk-1"/>
 
 In normal circumstances, the admin should let the cluster populate this
 information automatically from the cluster layer.
 
 
 .. _node_name:
 
 Where Pacemaker Gets the Node Name
 ##################################
 
 The name that Pacemaker uses for a node in the configuration does not have to
 be the same as its local hostname. Pacemaker uses the following for a Corosync
 node's name, in order of most preferred first:
 
 * The value of ``name`` in the ``nodelist`` section of ``corosync.conf``
 * The value of ``ring0_addr`` in the ``nodelist`` section of ``corosync.conf``
 * The local hostname (value of ``uname -n``)
 
 If the cluster is running, the ``crm_node -n`` command will display the local
 node's name as used by the cluster.
 
 If a Corosync ``nodelist`` is used, ``crm_node --name-for-id`` with a Corosync
 node ID will display the name used by the node with the given Corosync
 ``nodeid``, for example:
 
 .. code-block:: none
 
    crm_node --name-for-id 2
 
 
 .. index::
    single: node; attribute
    single: node attribute
 
 .. _node_attributes:
 
 Node Attributes
 _______________
 
 Pacemaker allows node-specific values to be specified using *node attributes*.
 A node attribute has a name, and may have a distinct value for each node.
 
 Node attributes come in two types, *permanent* and *transient*. Permanent node
 attributes are kept within the ``node`` entry, and keep their values even if
 the cluster restarts on a node. Transient node attributes are kept in the CIB's
 ``status`` section, and go away when the cluster stops on the node.
 
 While certain node attributes have specific meanings to the cluster, they are
 mainly intended to allow administrators and resource agents to track any
 information desired.
 
 For example, an administrator might choose to define node attributes for how
 much RAM and disk space each node has, which OS each uses, or which server room
 rack each node is in.
 
 Users can configure :ref:`rules` that use node attributes to affect where
 resources are placed.
 
 Setting and querying node attributes
 ####################################
 
 Node attributes can be set and queried using the ``crm_attribute`` and
 ``attrd_updater`` commands, so that the user does not have to deal with XML
 configuration directly.
 
 Here is an example command to set a permanent node attribute, and the XML
 configuration that would be generated:
 
 .. topic:: **Result of using crm_attribute to specify which kernel pcmk-1 is running**
 
    .. code-block:: none
 
       # crm_attribute --type nodes --node pcmk-1 --name kernel --update $(uname -r)
 
    .. code-block:: xml
 
       <node id="1" uname="pcmk-1">
          <instance_attributes id="nodes-1-attributes">
            <nvpair id="nodes-1-kernel" name="kernel" value="3.10.0-862.14.4.el7.x86_64"/>
          </instance_attributes>
       </node>
 
 To read back the value that was just set:
 
 .. code-block:: none
 
    # crm_attribute --type nodes --node pcmk-1 --name kernel --query
    scope=nodes  name=kernel value=3.10.0-862.14.4.el7.x86_64
 
 The ``--type nodes`` indicates that this is a permanent node attribute;
 ``--type status`` would indicate a transient node attribute.
 
 Special node attributes
 #######################
 
 Certain node attributes have special meaning to the cluster.
 
 Node attribute names beginning with ``#`` are considered reserved for these
 special attributes. Some special attributes do not start with ``#``, for
 historical reasons.
 
 Certain special attributes are set automatically by the cluster, should never
 be modified directly, and can be used only within :ref:`rules`; these are
 listed under
 :ref:`built-in node attributes <node-attribute-expressions-special>`.
 
 For true/false values, the cluster considers a value of "1", "y", "yes", "on",
 or "true" (case-insensitively) to be true, "0", "n", "no", "off", "false", or
 unset to be false, and anything else to be an error.
 
 .. table:: **Node attributes with special significance**
+   :class: longtable
+   :widths: 1 2
 
    +----------------------------+-----------------------------------------------------+
    | Name                       | Description                                         |
    +============================+=====================================================+
    | fail-count-*               | .. index::                                          |
    |                            |    pair: node attribute; fail-count                 |
    |                            |                                                     |
    |                            | Attributes whose names start with                   |
    |                            | ``fail-count-`` are managed by the cluster          |
    |                            | to track how many times particular resource         |
    |                            | operations have failed on this node. These          |
    |                            | should be queried and cleared via the               |
    |                            | ``crm_failcount`` or                                |
    |                            | ``crm_resource --cleanup`` commands rather          |
    |                            | than directly.                                      |
    +----------------------------+-----------------------------------------------------+
    | last-failure-*             | .. index::                                          |
    |                            |    pair: node attribute; last-failure               |
    |                            |                                                     |
    |                            | Attributes whose names start with                   |
    |                            | ``last-failure-`` are managed by the cluster        |
    |                            | to track when particular resource operations        |
    |                            | have most recently failed on this node.             |
    |                            | These should be cleared via the                     |
    |                            | ``crm_failcount`` or                                |
    |                            | ``crm_resource --cleanup`` commands rather          |
    |                            | than directly.                                      |
    +----------------------------+-----------------------------------------------------+
    | maintenance                | .. index::                                          |
    |                            |    pair: node attribute; maintenance                |
    |                            |                                                     |
    |                            | Similar to the ``maintenance-mode``                 |
    |                            | :ref:`cluster option <cluster_options>`, but        |
    |                            | for a single node. If true, resources will          |
    |                            | not be started or stopped on the node,              |
    |                            | resources and individual clone instances            |
    |                            | running on the node will become unmanaged,          |
    |                            | and any recurring operations for those will         |
    |                            | be cancelled.                                       |
    |                            |                                                     |
    |                            | **Warning:** Restarting pacemaker on a node that is |
    |                            | in single-node maintenance mode will likely         |
    |                            | lead to undesirable effects. If                     |
    |                            | ``maintenance`` is set as a transient               |
    |                            | attribute, it will be erased when                   |
    |                            | Pacemaker is stopped, which will                    |
    |                            | immediately take the node out of                    |
    |                            | maintenance mode and likely get it                  |
    |                            | fenced. Even if permanent, if Pacemaker             |
    |                            | is restarted, any resources active on the           |
    |                            | node will have their local history erased           |
    |                            | when the node rejoins, so the cluster               |
    |                            | will no longer consider them running on             |
    |                            | the node and thus will consider them                |
    |                            | managed again, leading them to be started           |
    |                            | elsewhere. This behavior might be                   |
    |                            | improved in a future release.                       |
    +----------------------------+-----------------------------------------------------+
    | probe_complete             | .. index::                                          |
    |                            |    pair: node attribute; probe_complete             |
    |                            |                                                     |
    |                            | This is managed by the cluster to detect            |
    |                            | when nodes need to be reprobed, and should          |
    |                            | never be used directly.                             |
    +----------------------------+-----------------------------------------------------+
    | resource-discovery-enabled | .. index::                                          |
    |                            |    pair: node attribute; resource-discovery-enabled |
    |                            |                                                     |
    |                            | If the node is a remote node, fencing is enabled,   |
    |                            | and this attribute is explicitly set to false       |
    |                            | (unset means true in this case), resource discovery |
    |                            | (probes) will not be done on this node. This is     |
    |                            | highly discouraged; the ``resource-discovery``      |
    |                            | location constraint property is preferred for this  |
    |                            | purpose.                                            |
    +----------------------------+-----------------------------------------------------+
    | shutdown                   | .. index::                                          |
    |                            |    pair: node attribute; shutdown                   |
    |                            |                                                     |
    |                            | This is managed by the cluster to orchestrate the   |
    |                            | shutdown of a node, and should never be used        |
    |                            | directly.                                           |
    +----------------------------+-----------------------------------------------------+
    | site-name                  | .. index::                                          |
    |                            |    pair: node attribute; site-name                  |
    |                            |                                                     |
    |                            | If set, this will be used as the value of the       |
    |                            | ``#site-name`` node attribute used in rules. (If    |
    |                            | not set, the value of the ``cluster-name`` cluster  |
    |                            | option will be used as ``#site-name`` instead.)     |
    +----------------------------+-----------------------------------------------------+
    | standby                    | .. index::                                          |
    |                            |    pair: node attribute; standby                    |
    |                            |                                                     |
    |                            | If true, the node is in standby mode. This is       |
    |                            | typically set and queried via the ``crm_standby``   |
    |                            | command rather than directly.                       |
    +----------------------------+-----------------------------------------------------+
    | terminate                  | .. index::                                          |
    |                            |    pair: node attribute; terminate                  |
    |                            |                                                     |
    |                            | If the value is true or begins with any nonzero     |
    |                            | number, the node will be fenced. This is typically  |
    |                            | set by tools rather than directly.                  |
    +----------------------------+-----------------------------------------------------+
    | #digests-*                 | .. index::                                          |
    |                            |    pair: node attribute; #digests                   |
    |                            |                                                     |
    |                            | Attributes whose names start with ``#digests-`` are |
    |                            | managed by the cluster to detect when               |
    |                            | :ref:`unfencing` needs to be redone, and should     |
    |                            | never be used directly.                             |
    +----------------------------+-----------------------------------------------------+
    | #node-unfenced             | .. index::                                          |
    |                            |    pair: node attribute; #node-unfenced             |
    |                            |                                                     |
    |                            | When the node was last unfenced (as seconds since   |
    |                            | the epoch). This is managed by the cluster and      |
    |                            | should never be used directly.                      |
    +----------------------------+-----------------------------------------------------+
 
 .. index::
    single: node; health
 
 .. _node-health:
 
 Tracking Node Health
 ____________________
 
 A node may be functioning adequately as far as cluster membership is concerned,
 and yet be "unhealthy" in some respect that makes it an undesirable location
 for resources. For example, a disk drive may be reporting SMART errors, or the
 CPU may be highly loaded.
 
 Pacemaker offers a way to automatically move resources off unhealthy nodes.
 
 .. index::
    single: node attribute; health
 
 Node Health Attributes
 ######################
 
 Pacemaker will treat any node attribute whose name starts with ``#health`` as
 an indicator of node health. Node health attributes may have one of the
 following values:
 
 .. table:: **Allowed Values for Node Health Attributes**
+   :widths: 1 4
 
    +------------+--------------------------------------------------------------+
    | Value      | Intended significance                                        |
    +============+==============================================================+
    | ``red``    | .. index::                                                   |
    |            |    single: red; node health attribute value                  |
    |            |    single: node attribute; health (red)                      |
    |            |                                                              |
    |            | This indicator is unhealthy                                  |
    +------------+--------------------------------------------------------------+
    | ``yellow`` | .. index::                                                   |
    |            |    single: yellow; node health attribute value               |
    |            |    single: node attribute; health (yellow)                   |
    |            |                                                              |
    |            | This indicator is becoming unhealthy                         |
    +------------+--------------------------------------------------------------+
    | ``green``  | .. index::                                                   |
    |            |    single: green; node health attribute value                |
    |            |    single: node attribute; health (green)                    |
    |            |                                                              |
    |            | This indicator is healthy                                    |
    +------------+--------------------------------------------------------------+
    | *integer*  | .. index::                                                   |
    |            |    single: score; node health attribute value                |
    |            |    single: node attribute; health (score)                    |
    |            |                                                              |
    |            | A numeric score to apply to all resources on this node (0 or |
    |            | positive is healthy, negative is unhealthy)                  |
    +------------+--------------------------------------------------------------+
 
 
 .. index::
    pair: cluster option; node-health-strategy
 
 Node Health Strategy
 ####################
 
 Pacemaker assigns a node health score to each node, as the sum of the values of
 all its node health attributes. This score will be used as a location
 constraint applied to this node for all resources.
 
 The ``node-health-strategy`` cluster option controls how Pacemaker responds to
 changes in node health attributes, and how it translates ``red``, ``yellow``,
 and ``green`` to scores.
 
 Allowed values are:
 
 .. table:: **Node Health Strategies**
+   :widths: 1 4
 
    +----------------+----------------------------------------------------------+
    | Value          | Effect                                                   |
    +================+==========================================================+
    | none           | .. index::                                               |
    |                |    single: node-health-strategy; none                    |
    |                |    single: none; node-health-strategy value              |
    |                |                                                          |
    |                | Do not track node health attributes at all.              |
    +----------------+----------------------------------------------------------+
    | migrate-on-red | .. index::                                               |
    |                |    single: node-health-strategy; migrate-on-red          |
    |                |    single: migrate-on-red; node-health-strategy value    |
    |                |                                                          |
    |                | Assign the value of ``-INFINITY`` to ``red``, and 0 to   |
    |                | ``yellow`` and ``green``. This will cause all resources  |
    |                | to move off the node if any attribute is ``red``.        |
    +----------------+----------------------------------------------------------+
    | only-green     | .. index::                                               |
    |                |    single: node-health-strategy; only-green              |
    |                |    single: only-green; node-health-strategy value        |
    |                |                                                          |
    |                | Assign the value of ``-INFINITY`` to ``red`` and         |
    |                | ``yellow``, and 0 to ``green``. This will cause all      |
    |                | resources to move off the node if any attribute is       |
    |                | ``red`` or ``yellow``.                                   |
    +----------------+----------------------------------------------------------+
    | progressive    | .. index::                                               |
    |                |    single: node-health-strategy; progressive             |
    |                |    single: progressive; node-health-strategy value       |
    |                |                                                          |
    |                | Assign the value of the ``node-health-red`` cluster      |
    |                | option to ``red``, the value of ``node-health-yellow``   |
    |                | to ``yellow``, and the value of ``node-health-green`` to |
    |                | ``green``. Each node is additionally assigned a score of |
    |                | ``node-health-base`` (this allows resources to start     |
    |                | even if some attributes are ``yellow``). This strategy   |
    |                | gives the administrator finer control over how important |
    |                | each value is.                                           |
    +----------------+----------------------------------------------------------+
    | custom         | .. index::                                               |
    |                |    single: node-health-strategy; custom                  |
    |                |    single: custom; node-health-strategy value            |
    |                |                                                          |
    |                | Track node health attributes using the same values as    |
    |                | ``progressive`` for ``red``, ``yellow``, and ``green``,  |
    |                | but do not take them into account. The administrator is  |
    |                | expected to implement a policy by defining :ref:`rules`  |
    |                | referencing node health attributes.                      |
    +----------------+----------------------------------------------------------+
 
 
 Exempting a Resource from Health Restrictions
 #############################################
 
 If you want a resource to be able to run on a node even if its health score
 would otherwise prevent it, set the resource's ``allow-unhealthy-nodes``
 meta-attribute to ``true`` *(available since 2.1.3)*.
 
 This is particularly useful for node health agents, to allow them to detect
 when the node becomes healthy again. If you configure a health agent without
 this setting, then the health agent will be banned from an unhealthy node,
 and you will have to investigate and clear the health attribute manually once
 it is healthy to allow resources on the node again.
 
 If you want the meta-attribute to apply to a clone, it must be set on the clone
 itself, not on the resource being cloned.
 
 
 Configuring Node Health Agents
 ##############################
 
 Since Pacemaker calculates node health based on node attributes, any method
 that sets node attributes may be used to measure node health. The most common
 are resource agents and custom daemons.
 
 Pacemaker provides examples that can be used directly or as a basis for custom
 code. The ``ocf:pacemaker:HealthCPU``, ``ocf:pacemaker:HealthIOWait``, and
 ``ocf:pacemaker:HealthSMART`` resource agents set node health attributes based
 on CPU and disk status.
 
 To take advantage of this feature, add the resource to your cluster (generally
 as a cloned resource with a recurring monitor action, to continually check the
 health of all nodes). For example:
 
 .. topic:: Example HealthIOWait resource configuration
 
    .. code-block:: xml
 
       <clone id="resHealthIOWait-clone">
         <primitive class="ocf" id="HealthIOWait" provider="pacemaker" type="HealthIOWait">
           <instance_attributes id="resHealthIOWait-instance_attributes">
             <nvpair id="resHealthIOWait-instance_attributes-red_limit" name="red_limit" value="30"/>
             <nvpair id="resHealthIOWait-instance_attributes-yellow_limit" name="yellow_limit" value="10"/>
           </instance_attributes>
           <operations>
             <op id="resHealthIOWait-monitor-interval-5" interval="5" name="monitor" timeout="5"/>
             <op id="resHealthIOWait-start-interval-0s" interval="0s" name="start" timeout="10s"/>
             <op id="resHealthIOWait-stop-interval-0s" interval="0s" name="stop" timeout="10s"/>
           </operations>
         </primitive>
       </clone>
 
 The resource agents use ``attrd_updater`` to set proper status for each node
 running this resource, as a node attribute whose name starts with ``#health``
 (for ``HealthIOWait``, the node attribute is named ``#health-iowait``).
 
 When a node is no longer faulty, you can force the cluster to make it available
 to take resources without waiting for the next monitor, by setting the node
 health attribute to green. For example:
 
 .. topic:: **Force node1 to be marked as healthy**
 
    .. code-block:: none
 
       # attrd_updater --name "#health-iowait" --update "green" --node "node1"
diff --git a/doc/sphinx/Pacemaker_Explained/options.rst b/doc/sphinx/Pacemaker_Explained/options.rst
index c83be50819..ee0511c58e 100644
--- a/doc/sphinx/Pacemaker_Explained/options.rst
+++ b/doc/sphinx/Pacemaker_Explained/options.rst
@@ -1,618 +1,622 @@
 Cluster-Wide Configuration
 --------------------------
 
 .. index::
    pair: XML element; cib
    pair: XML element; configuration
 
 Configuration Layout
 ####################
 
 The cluster is defined by the Cluster Information Base (CIB), which uses XML
 notation. The simplest CIB, an empty one, looks like this:
 
 .. topic:: An empty configuration
 
    .. code-block:: xml
 
       <cib crm_feature_set="3.6.0" validate-with="pacemaker-3.5" epoch="1" num_updates="0" admin_epoch="0">
         <configuration>
           <crm_config/>
           <nodes/>
           <resources/>
           <constraints/>
         </configuration>
         <status/>
       </cib>
 
 The empty configuration above contains the major sections that make up a CIB:
 
 * ``cib``: The entire CIB is enclosed with a ``cib`` element. Certain
   fundamental settings are defined as attributes of this element.
 
   * ``configuration``: This section -- the primary focus of this document --
     contains traditional configuration information such as what resources the
     cluster serves and the relationships among them.
 
     * ``crm_config``: cluster-wide configuration options
 
     * ``nodes``: the machines that host the cluster
 
     * ``resources``: the services run by the cluster
 
     * ``constraints``: indications of how resources should be placed
 
   * ``status``: This section contains the history of each resource on each
     node. Based on this data, the cluster can construct the complete current
     state of the cluster. The authoritative source for this section is the
     local executor (pacemaker-execd process) on each cluster node, and the
     cluster will occasionally repopulate the entire section. For this reason,
     it is never written to disk, and administrators are advised against
     modifying it in any way.
 
 In this document, configuration settings will be described as properties or
 options based on how they are defined in the CIB:
 
 * Properties are XML attributes of an XML element.
 
 * Options are name-value pairs expressed as ``nvpair`` child elements of an XML
   element.
 
 Normally, you will use command-line tools that abstract the XML, so the
 distinction will be unimportant; both properties and options are cluster
 settings you can tweak.
 
 CIB Properties
 ##############
 
 Certain settings are defined by CIB properties (that is, attributes of the
 ``cib`` tag) rather than with the rest of the cluster configuration in the
 ``configuration`` section.
 
 The reason is simply a matter of parsing. These options are used by the
 configuration database which is, by design, mostly ignorant of the content it
 holds. So the decision was made to place them in an easy-to-find location.
 
 .. table:: **CIB Properties**
+   :class: longtable
+   :widths: 1 3
 
    +------------------+-----------------------------------------------------------+
    | Attribute        | Description                                               |
    +==================+===========================================================+
    | admin_epoch      | .. index::                                                |
    |                  |    pair: admin_epoch; cib                                 |
    |                  |                                                           |
    |                  | When a node joins the cluster, the cluster performs a     |
    |                  | check to see which node has the best configuration. It    |
    |                  | asks the node with the highest (``admin_epoch``,          |
    |                  | ``epoch``, ``num_updates``) tuple to replace the          |
    |                  | configuration on all the nodes -- which makes setting     |
    |                  | them, and setting them correctly, very important.         |
    |                  | ``admin_epoch`` is never modified by the cluster; you can |
    |                  | use this to make the configurations on any inactive nodes |
    |                  | obsolete.                                                 |
    |                  |                                                           |
    |                  | **Warning:** Never set this value to zero. In such cases, |
    |                  | the cluster cannot tell the difference between your       |
    |                  | configuration and the "empty" one used when nothing is    |
    |                  | found on disk.                                            |
    +------------------+-----------------------------------------------------------+
    | epoch            | .. index::                                                |
    |                  |    pair: epoch; cib                                       |
    |                  |                                                           |
    |                  | The cluster increments this every time the configuration  |
    |                  | is updated (usually by the administrator).                |
    +------------------+-----------------------------------------------------------+
    | num_updates      | .. index::                                                |
    |                  |    pair: num_updates; cib                                 |
    |                  |                                                           |
    |                  | The cluster increments this every time the configuration  |
    |                  | or status is updated (usually by the cluster) and resets  |
    |                  | it to 0 when epoch changes.                               |
    +------------------+-----------------------------------------------------------+
    | validate-with    | .. index::                                                |
    |                  |    pair: validate-with; cib                               |
    |                  |                                                           |
    |                  | Determines the type of XML validation that will be done   |
    |                  | on the configuration.  If set to ``none``, the cluster    |
    |                  | will not verify that updates conform to the DTD (nor      |
    |                  | reject ones that don't).                                  |
    +------------------+-----------------------------------------------------------+
    | cib-last-written | .. index::                                                |
    |                  |    pair: cib-last-written; cib                            |
    |                  |                                                           |
    |                  | Indicates when the configuration was last written to      |
    |                  | disk. Maintained by the cluster; for informational        |
    |                  | purposes only.                                            |
    +------------------+-----------------------------------------------------------+
    | have-quorum      | .. index::                                                |
    |                  |    pair: have-quorum; cib                                 |
    |                  |                                                           |
    |                  | Indicates if the cluster has quorum. If false, this may   |
    |                  | mean that the cluster cannot start resources or fence     |
    |                  | other nodes (see ``no-quorum-policy`` below). Maintained  |
    |                  | by the cluster.                                           |
    +------------------+-----------------------------------------------------------+
    | dc-uuid          | .. index::                                                |
    |                  |    pair: dc-uuid; cib                                     |
    |                  |                                                           |
    |                  | Indicates which cluster node is the current leader. Used  |
    |                  | by the cluster when placing resources and determining the |
    |                  | order of some events. Maintained by the cluster.          |
    +------------------+-----------------------------------------------------------+
 
 .. _cluster_options:
 
 Cluster Options
 ###############
 
 Cluster options, as you might expect, control how the cluster behaves when
 confronted with various situations.
 
 They are grouped into sets within the ``crm_config`` section. In advanced
 configurations, there may be more than one set. (This will be described later
 in the chapter on :ref:`rules` where we will show how to have the cluster use
 different sets of options during working hours than during weekends.) For now,
 we will describe the simple case where each option is present at most once.
 
 You can obtain an up-to-date list of cluster options, including their default
 values, by running the ``man pacemaker-schedulerd`` and
 ``man pacemaker-controld`` commands.
 
 .. table:: **Cluster Options**
+   :class: longtable
+   :widths: 2 1 4
 
    +---------------------------+---------+----------------------------------------------------+
    | Option                    | Default | Description                                        |
    +===========================+=========+====================================================+
    | cluster-name              |         | .. index::                                         |
    |                           |         |    pair: cluster option; cluster-name              |
    |                           |         |                                                    |
    |                           |         | An (optional) name for the cluster as a whole.     |
    |                           |         | This is mostly for users' convenience for use      |
    |                           |         | as desired in administration, but this can be      |
    |                           |         | used in the Pacemaker configuration in             |
    |                           |         | :ref:`rules` (as the ``#cluster-name``             |
    |                           |         | :ref:`node attribute                               |
    |                           |         | <node-attribute-expressions-special>`. It may      |
    |                           |         | also be used by higher-level tools when            |
    |                           |         | displaying cluster information, and by             |
    |                           |         | certain resource agents (for example, the          |
    |                           |         | ``ocf:heartbeat:GFS2`` agent stores the            |
    |                           |         | cluster name in filesystem meta-data).             |
    +---------------------------+---------+----------------------------------------------------+
    | dc-version                |         | .. index::                                         |
    |                           |         |    pair: cluster option; dc-version                |
    |                           |         |                                                    |
    |                           |         | Version of Pacemaker on the cluster's DC.          |
    |                           |         | Determined automatically by the cluster. Often     |
    |                           |         | includes the hash which identifies the exact       |
    |                           |         | Git changeset it was built from. Used for          |
    |                           |         | diagnostic purposes.                               |
    +---------------------------+---------+----------------------------------------------------+
    | cluster-infrastructure    |         | .. index::                                         |
    |                           |         |    pair: cluster option; cluster-infrastructure    |
    |                           |         |                                                    |
    |                           |         | The messaging stack on which Pacemaker is          |
    |                           |         | currently running. Determined automatically by     |
    |                           |         | the cluster. Used for informational and            |
    |                           |         | diagnostic purposes.                               |
    +---------------------------+---------+----------------------------------------------------+
    | no-quorum-policy          | stop    | .. index::                                         |
    |                           |         |    pair: cluster option; no-quorum-policy          |
    |                           |         |                                                    |
    |                           |         | What to do when the cluster does not have          |
    |                           |         | quorum. Allowed values:                            |
    |                           |         |                                                    |
    |                           |         | * ``ignore:`` continue all resource management     |
    |                           |         | * ``freeze:`` continue resource management, but    |
    |                           |         |   don't recover resources from nodes not in the    |
    |                           |         |   affected partition                               |
    |                           |         | * ``stop:`` stop all resources in the affected     |
    |                           |         |   cluster partition                                |
    |                           |         | * ``demote:`` demote promotable resources and      |
    |                           |         |   stop all other resources in the affected         |
    |                           |         |   cluster partition *(since 2.0.5)*                |
    |                           |         | * ``suicide:`` fence all nodes in the affected     |
    |                           |         |   cluster partition                                |
    +---------------------------+---------+----------------------------------------------------+
    | batch-limit               | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; batch-limit               |
    |                           |         |                                                    |
    |                           |         | The maximum number of actions that the cluster     |
    |                           |         | may execute in parallel across all nodes. The      |
    |                           |         | "correct" value will depend on the speed and       |
    |                           |         | load of your network and cluster nodes. If zero,   |
    |                           |         | the cluster will impose a dynamically calculated   |
    |                           |         | limit only when any node has high load. If -1, the |
    |                           |         | cluster will not impose any limit.                 |
    +---------------------------+---------+----------------------------------------------------+
    | migration-limit           | -1      | .. index::                                         |
    |                           |         |    pair: cluster option; migration-limit           |
    |                           |         |                                                    |
    |                           |         | The number of                                      |
    |                           |         | :ref:`live migration <live-migration>` actions     |
    |                           |         | that the cluster is allowed to execute in          |
    |                           |         | parallel on a node. A value of -1 means            |
    |                           |         | unlimited.                                         |
    +---------------------------+---------+----------------------------------------------------+
    | symmetric-cluster         | true    | .. index::                                         |
    |                           |         |    pair: cluster option; symmetric-cluster         |
    |                           |         |                                                    |
    |                           |         | Whether resources can run on any node by default   |
    |                           |         | (if false, a resource is allowed to run on a       |
    |                           |         | node only if a                                     |
    |                           |         | :ref:`location constraint <location-constraint>`   |
    |                           |         | enables it)                                        |
    +---------------------------+---------+----------------------------------------------------+
    | stop-all-resources        | false   | .. index::                                         |
    |                           |         |    pair: cluster option; stop-all-resources        |
    |                           |         |                                                    |
    |                           |         | Whether all resources should be disallowed from    |
    |                           |         | running (can be useful during maintenance)         |
    +---------------------------+---------+----------------------------------------------------+
    | stop-orphan-resources     | true    | .. index::                                         |
    |                           |         |    pair: cluster option; stop-orphan-resources     |
    |                           |         |                                                    |
    |                           |         | Whether resources that have been deleted from      |
    |                           |         | the configuration should be stopped. This value    |
    |                           |         | takes precedence over ``is-managed`` (that is,     |
    |                           |         | even unmanaged resources will be stopped when      |
    |                           |         | orphaned if this value is ``true``                 |
    +---------------------------+---------+----------------------------------------------------+
    | stop-orphan-actions       | true    | .. index::                                         |
    |                           |         |    pair: cluster option; stop-orphan-actions       |
    |                           |         |                                                    |
    |                           |         | Whether recurring :ref:`operations <operation>`    |
    |                           |         | that have been deleted from the configuration      |
    |                           |         | should be cancelled                                |
    +---------------------------+---------+----------------------------------------------------+
    | start-failure-is-fatal    | true    | .. index::                                         |
    |                           |         |    pair: cluster option; start-failure-is-fatal    |
    |                           |         |                                                    |
    |                           |         | Whether a failure to start a resource on a         |
    |                           |         | particular node prevents further start attempts    |
    |                           |         | on that node? If ``false``, the cluster will       |
    |                           |         | decide whether the node is still eligible based    |
    |                           |         | on the resource's current failure count and        |
    |                           |         | :ref:`migration-threshold <failure-handling>`.     |
    +---------------------------+---------+----------------------------------------------------+
    | enable-startup-probes     | true    | .. index::                                         |
    |                           |         |    pair: cluster option; enable-startup-probes     |
    |                           |         |                                                    |
    |                           |         | Whether the cluster should check the               |
    |                           |         | pre-existing state of resources when the cluster   |
    |                           |         | starts                                             |
    +---------------------------+---------+----------------------------------------------------+
    | maintenance-mode          | false   | .. index::                                         |
    |                           |         |    pair: cluster option; maintenance-mode          |
    |                           |         |                                                    |
    |                           |         | Whether the cluster should refrain from            |
    |                           |         | monitoring, starting and stopping resources        |
    +---------------------------+---------+----------------------------------------------------+
    | stonith-enabled           | true    | .. index::                                         |
    |                           |         |    pair: cluster option; stonith-enabled           |
    |                           |         |                                                    |
    |                           |         | Whether the cluster is allowed to fence nodes      |
    |                           |         | (for example, failed nodes and nodes with          |
    |                           |         | resources that can't be stopped.                   |
    |                           |         |                                                    |
    |                           |         | If true, at least one fence device must be         |
    |                           |         | configured before resources are allowed to run.    |
    |                           |         |                                                    |
    |                           |         | If false, unresponsive nodes are immediately       |
    |                           |         | assumed to be running no resources, and resource   |
    |                           |         | recovery on online nodes starts without any        |
    |                           |         | further protection (which can mean *data loss*     |
    |                           |         | if the unresponsive node still accesses shared     |
    |                           |         | storage, for example). See also the                |
    |                           |         | :ref:`requires <requires>` resource                |
    |                           |         | meta-attribute.                                    |
    +---------------------------+---------+----------------------------------------------------+
    | stonith-action            | reboot  | .. index::                                         |
    |                           |         |    pair: cluster option; stonith-action            |
    |                           |         |                                                    |
    |                           |         | Action the cluster should send to the fence agent  |
    |                           |         | when a node must be fenced. Allowed values are     |
    |                           |         | ``reboot``, ``off``, and (for legacy agents only)  |
    |                           |         | ``poweroff``.                                      |
    +---------------------------+---------+----------------------------------------------------+
    | stonith-timeout           | 60s     | .. index::                                         |
    |                           |         |    pair: cluster option; stonith-timeout           |
    |                           |         |                                                    |
    |                           |         | How long to wait for ``on``, ``off``, and          |
    |                           |         | ``reboot`` fence actions to complete by default.   |
    +---------------------------+---------+----------------------------------------------------+
    | stonith-max-attempts      | 10      | .. index::                                         |
    |                           |         |    pair: cluster option; stonith-max-attempts      |
    |                           |         |                                                    |
    |                           |         | How many times fencing can fail for a target       |
    |                           |         | before the cluster will no longer immediately      |
    |                           |         | re-attempt it.                                     |
    +---------------------------+---------+----------------------------------------------------+
    | stonith-watchdog-timeout  | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; stonith-watchdog-timeout  |
    |                           |         |                                                    |
    |                           |         | If nonzero, and the cluster detects                |
    |                           |         | ``have-watchdog`` as ``true``, then watchdog-based |
    |                           |         | self-fencing will be performed via SBD when        |
    |                           |         | fencing is required, without requiring a fencing   |
    |                           |         | resource explicitly configured.                    |
    |                           |         |                                                    |
    |                           |         | If this is set to a positive value, unseen nodes   |
    |                           |         | are assumed to self-fence within this much time.   |
    |                           |         |                                                    |
    |                           |         | **Warning:** It must be ensured that this value is |
    |                           |         | larger than the ``SBD_WATCHDOG_TIMEOUT``           |
    |                           |         | environment variable on all nodes. Pacemaker       |
    |                           |         | verifies the settings individually on all nodes    |
    |                           |         | and prevents startup or shuts down if configured   |
    |                           |         | wrongly on the fly. It is strongly recommended     |
    |                           |         | that ``SBD_WATCHDOG_TIMEOUT`` be set to the same   |
    |                           |         | value on all nodes.                                |
    |                           |         |                                                    |
    |                           |         | If this is set to a negative value, and            |
    |                           |         | ``SBD_WATCHDOG_TIMEOUT`` is set, twice that value  |
    |                           |         | will be used.                                      |
    |                           |         |                                                    |
    |                           |         | **Warning:** In this case, it is essential (and    |
    |                           |         | currently not verified by pacemaker) that          |
    |                           |         | ``SBD_WATCHDOG_TIMEOUT`` is set to the same        |
    |                           |         | value on all nodes.                                |
    +---------------------------+---------+----------------------------------------------------+
    | concurrent-fencing        | false   | .. index::                                         |
    |                           |         |    pair: cluster option; concurrent-fencing        |
    |                           |         |                                                    |
    |                           |         | Whether the cluster is allowed to initiate         |
    |                           |         | multiple fence actions concurrently. Fence actions |
    |                           |         | initiated externally, such as via the              |
    |                           |         | ``stonith_admin`` tool or an application such as   |
    |                           |         | DLM, or by the fencer itself such as recurring     |
    |                           |         | device monitors and ``status`` and ``list``        |
    |                           |         | commands, are not limited by this option.          |
    +---------------------------+---------+----------------------------------------------------+
    | fence-reaction            | stop    | .. index::                                         |
    |                           |         |    pair: cluster option; fence-reaction            |
    |                           |         |                                                    |
    |                           |         | How should a cluster node react if notified of its |
    |                           |         | own fencing? A cluster node may receive            |
    |                           |         | notification of its own fencing if fencing is      |
    |                           |         | misconfigured, or if fabric fencing is in use that |
    |                           |         | doesn't cut cluster communication. Allowed values  |
    |                           |         | are ``stop`` to attempt to immediately stop        |
    |                           |         | pacemaker and stay stopped, or ``panic`` to        |
    |                           |         | attempt to immediately reboot the local node,      |
    |                           |         | falling back to stop on failure. The default is    |
    |                           |         | likely to be changed to ``panic`` in a future      |
    |                           |         | release. *(since 2.0.3)*                           |
    +---------------------------+---------+----------------------------------------------------+
    | priority-fencing-delay    | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; priority-fencing-delay    |
    |                           |         |                                                    |
    |                           |         | Apply this delay to any fencing targeting the lost |
    |                           |         | nodes with the highest total resource priority in  |
    |                           |         | case we don't have the majority of the nodes in    |
    |                           |         | our cluster partition, so that the more            |
    |                           |         | significant nodes potentially win any fencing      |
    |                           |         | match (especially meaningful in a split-brain of a |
    |                           |         | 2-node cluster). A promoted resource instance      |
    |                           |         | takes the resource's priority plus 1 if the        |
    |                           |         | resource's priority is not 0. Any static or random |
    |                           |         | delays introduced by ``pcmk_delay_base`` and       |
    |                           |         | ``pcmk_delay_max`` configured for the              |
    |                           |         | corresponding fencing resources will be added to   |
    |                           |         | this delay. This delay should be significantly     |
    |                           |         | greater than (safely twice) the maximum delay from |
    |                           |         | those parameters. *(since 2.0.4)*                  |
    +---------------------------+---------+----------------------------------------------------+
    | cluster-delay             | 60s     | .. index::                                         |
    |                           |         |    pair: cluster option; cluster-delay             |
    |                           |         |                                                    |
    |                           |         | Estimated maximum round-trip delay over the        |
    |                           |         | network (excluding action execution). If the DC    |
    |                           |         | requires an action to be executed on another node, |
    |                           |         | it will consider the action failed if it does not  |
    |                           |         | get a response from the other node in this time    |
    |                           |         | (after considering the action's own timeout). The  |
    |                           |         | "correct" value will depend on the speed and load  |
    |                           |         | of your network and cluster nodes.                 |
    +---------------------------+---------+----------------------------------------------------+
    | dc-deadtime               | 20s     | .. index::                                         |
    |                           |         |    pair: cluster option; dc-deadtime               |
    |                           |         |                                                    |
    |                           |         | How long to wait for a response from other nodes   |
    |                           |         | during startup. The "correct" value will depend on |
    |                           |         | the speed/load of your network and the type of     |
    |                           |         | switches used.                                     |
    +---------------------------+---------+----------------------------------------------------+
    | cluster-ipc-limit         | 500     | .. index::                                         |
    |                           |         |    pair: cluster option; cluster-ipc-limit         |
    |                           |         |                                                    |
    |                           |         | The maximum IPC message backlog before one cluster |
    |                           |         | daemon will disconnect another. This is of use in  |
    |                           |         | large clusters, for which a good value is the      |
    |                           |         | number of resources in the cluster multiplied by   |
    |                           |         | the number of nodes. The default of 500 is also    |
    |                           |         | the minimum. Raise this if you see                 |
    |                           |         | "Evicting client" messages for cluster daemon PIDs |
    |                           |         | in the logs.                                       |
    +---------------------------+---------+----------------------------------------------------+
    | pe-error-series-max       | -1      | .. index::                                         |
    |                           |         |    pair: cluster option; pe-error-series-max       |
    |                           |         |                                                    |
    |                           |         | The number of scheduler inputs resulting in errors |
    |                           |         | to save. Used when reporting problems. A value of  |
    |                           |         | -1 means unlimited (report all), and 0 means none. |
    +---------------------------+---------+----------------------------------------------------+
    | pe-warn-series-max        | 5000    | .. index::                                         |
    |                           |         |    pair: cluster option; pe-warn-series-max        |
    |                           |         |                                                    |
    |                           |         | The number of scheduler inputs resulting in        |
    |                           |         | warnings to save. Used when reporting problems. A  |
    |                           |         | value of -1 means unlimited (report all), and 0    |
    |                           |         | means none.                                        |
    +---------------------------+---------+----------------------------------------------------+
    | pe-input-series-max       | 4000    | .. index::                                         |
    |                           |         |    pair: cluster option; pe-input-series-max       |
    |                           |         |                                                    |
    |                           |         | The number of "normal" scheduler inputs to save.   |
    |                           |         | Used when reporting problems. A value of -1 means  |
    |                           |         | unlimited (report all), and 0 means none.          |
    +---------------------------+---------+----------------------------------------------------+
    | enable-acl                | false   | .. index::                                         |
    |                           |         |    pair: cluster option; enable-acl                |
    |                           |         |                                                    |
    |                           |         | Whether :ref:`acl` should be used to authorize     |
    |                           |         | modifications to the CIB                           |
    +---------------------------+---------+----------------------------------------------------+
    | placement-strategy        | default | .. index::                                         |
    |                           |         |    pair: cluster option; placement-strategy        |
    |                           |         |                                                    |
    |                           |         | How the cluster should allocate resources to nodes |
    |                           |         | (see :ref:`utilization`). Allowed values are       |
    |                           |         | ``default``, ``utilization``, ``balanced``, and    |
    |                           |         | ``minimal``.                                       |
    +---------------------------+---------+----------------------------------------------------+
    | node-health-strategy      | none    | .. index::                                         |
    |                           |         |    pair: cluster option; node-health-strategy      |
    |                           |         |                                                    |
    |                           |         | How the cluster should react to node health        |
    |                           |         | attributes (see :ref:`node-health`). Allowed values|
    |                           |         | are ``none``, ``migrate-on-red``, ``only-green``,  |
    |                           |         | ``progressive``, and ``custom``.                   |
    +---------------------------+---------+----------------------------------------------------+
    | node-health-base          | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; node-health-base          |
    |                           |         |                                                    |
    |                           |         | The base health score assigned to a node. Only     |
    |                           |         | used when ``node-health-strategy`` is              |
    |                           |         | ``progressive``.                                   |
    +---------------------------+---------+----------------------------------------------------+
    | node-health-green         | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; node-health-green         |
    |                           |         |                                                    |
    |                           |         | The score to use for a node health attribute whose |
    |                           |         | value is ``green``. Only used when                 |
    |                           |         | ``node-health-strategy`` is ``progressive`` or     |
    |                           |         | ``custom``.                                        |
    +---------------------------+---------+----------------------------------------------------+
    | node-health-yellow        | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; node-health-yellow        |
    |                           |         |                                                    |
    |                           |         | The score to use for a node health attribute whose |
    |                           |         | value is ``yellow``. Only used when                |
    |                           |         | ``node-health-strategy`` is ``progressive`` or     |
    |                           |         | ``custom``.                                        |
    +---------------------------+---------+----------------------------------------------------+
    | node-health-red           | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; node-health-red           |
    |                           |         |                                                    |
    |                           |         | The score to use for a node health attribute whose |
    |                           |         | value is ``red``. Only used when                   |
    |                           |         | ``node-health-strategy`` is ``progressive`` or     |
    |                           |         | ``custom``.                                        |
    +---------------------------+---------+----------------------------------------------------+
    | cluster-recheck-interval  | 15min   | .. index::                                         |
    |                           |         |    pair: cluster option; cluster-recheck-interval  |
    |                           |         |                                                    |
    |                           |         | Pacemaker is primarily event-driven, and looks     |
    |                           |         | ahead to know when to recheck the cluster for      |
    |                           |         | failure timeouts and most time-based rules         |
    |                           |         | *(since 2.0.3)*. However, it will also recheck the |
    |                           |         | cluster after this amount of inactivity. This has  |
    |                           |         | two goals: rules with ``date_spec`` are only       |
    |                           |         | guaranteed to be checked this often, and it also   |
    |                           |         | serves as a fail-safe for some kinds of scheduler  |
    |                           |         | bugs. A value of 0 disables this polling; positive |
    |                           |         | values are a time interval.                        |
    +---------------------------+---------+----------------------------------------------------+
    | shutdown-lock             | false   | .. index::                                         |
    |                           |         |    pair: cluster option; shutdown-lock             |
    |                           |         |                                                    |
    |                           |         | The default of false allows active resources to be |
    |                           |         | recovered elsewhere when their node is cleanly     |
    |                           |         | shut down, which is what the vast majority of      |
    |                           |         | users will want. However, some users prefer to     |
    |                           |         | make resources highly available only for failures, |
    |                           |         | with no recovery for clean shutdowns. If this      |
    |                           |         | option is true, resources active on a node when it |
    |                           |         | is cleanly shut down are kept "locked" to that     |
    |                           |         | node (not allowed to run elsewhere) until they     |
    |                           |         | start again on that node after it rejoins (or for  |
    |                           |         | at most ``shutdown-lock-limit``, if set). Stonith  |
    |                           |         | resources and Pacemaker Remote connections are     |
    |                           |         | never locked. Clone and bundle instances and the   |
    |                           |         | promoted role of promotable clones are currently   |
    |                           |         | never locked, though support could be added in a   |
    |                           |         | future release. Locks may be manually cleared      |
    |                           |         | using the ``--refresh`` option of ``crm_resource`` |
    |                           |         | (both the resource and node must be specified;     |
    |                           |         | this works with remote nodes if their connection   |
    |                           |         | resource's ``target-role`` is set to ``Stopped``,  |
    |                           |         | but not if Pacemaker Remote is stopped on the      |
    |                           |         | remote node without disabling the connection       |
    |                           |         | resource).  *(since 2.0.4)*                        |
    +---------------------------+---------+----------------------------------------------------+
    | shutdown-lock-limit       | 0       | .. index::                                         |
    |                           |         |    pair: cluster option; shutdown-lock-limit       |
    |                           |         |                                                    |
    |                           |         | If ``shutdown-lock`` is true, and this is set to a |
    |                           |         | nonzero time duration, locked resources will be    |
    |                           |         | allowed to start after this much time has passed   |
    |                           |         | since the node shutdown was initiated, even if the |
    |                           |         | node has not rejoined. (This works with remote     |
    |                           |         | nodes only if their connection resource's          |
    |                           |         | ``target-role`` is set to ``Stopped``.)            |
    |                           |         | *(since 2.0.4)*                                    |
    +---------------------------+---------+----------------------------------------------------+
    | remove-after-stop         | false   | .. index::                                         |
    |                           |         |    pair: cluster option; remove-after-stop         |
    |                           |         |                                                    |
    |                           |         | *Deprecated* Should the cluster remove             |
    |                           |         | resources from Pacemaker's executor after they are |
    |                           |         | stopped? Values other than the default are, at     |
    |                           |         | best, poorly tested and potentially dangerous.     |
    |                           |         | This option is deprecated and will be removed in a |
    |                           |         | future release.                                    |
    +---------------------------+---------+----------------------------------------------------+
    | startup-fencing           | true    | .. index::                                         |
    |                           |         |    pair: cluster option; startup-fencing           |
    |                           |         |                                                    |
    |                           |         | *Advanced Use Only:* Should the cluster fence      |
    |                           |         | unseen nodes at start-up? Setting this to false is |
    |                           |         | unsafe, because the unseen nodes could be active   |
    |                           |         | and running resources but unreachable.             |
    +---------------------------+---------+----------------------------------------------------+
    | election-timeout          | 2min    | .. index::                                         |
    |                           |         |    pair: cluster option; election-timeout          |
    |                           |         |                                                    |
    |                           |         | *Advanced Use Only:* If you need to adjust this    |
    |                           |         | value, it probably indicates the presence of a bug.|
    +---------------------------+---------+----------------------------------------------------+
    | shutdown-escalation       | 20min   | .. index::                                         |
    |                           |         |    pair: cluster option; shutdown-escalation       |
    |                           |         |                                                    |
    |                           |         | *Advanced Use Only:* If you need to adjust this    |
    |                           |         | value, it probably indicates the presence of a bug.|
    +---------------------------+---------+----------------------------------------------------+
    | join-integration-timeout  | 3min    | .. index::                                         |
    |                           |         |    pair: cluster option; join-integration-timeout  |
    |                           |         |                                                    |
    |                           |         | *Advanced Use Only:* If you need to adjust this    |
    |                           |         | value, it probably indicates the presence of a bug.|
    +---------------------------+---------+----------------------------------------------------+
    | join-finalization-timeout | 30min   | .. index::                                         |
    |                           |         |    pair: cluster option; join-finalization-timeout |
    |                           |         |                                                    |
    |                           |         | *Advanced Use Only:* If you need to adjust this    |
    |                           |         | value, it probably indicates the presence of a bug.|
    +---------------------------+---------+----------------------------------------------------+
    | transition-delay          | 0s      | .. index::                                         |
    |                           |         |    pair: cluster option; transition-delay          |
    |                           |         |                                                    |
    |                           |         | *Advanced Use Only:* Delay cluster recovery for    |
    |                           |         | the configured interval to allow for additional or |
    |                           |         | related events to occur. This can be useful if     |
    |                           |         | your configuration is sensitive to the order in    |
    |                           |         | which ping updates arrive. Enabling this option    |
    |                           |         | will slow down cluster recovery under all          |
    |                           |         | conditions.                                        |
    +---------------------------+---------+----------------------------------------------------+
diff --git a/doc/sphinx/Pacemaker_Explained/resources.rst b/doc/sphinx/Pacemaker_Explained/resources.rst
index 853874bf90..0da286b3ec 100644
--- a/doc/sphinx/Pacemaker_Explained/resources.rst
+++ b/doc/sphinx/Pacemaker_Explained/resources.rst
@@ -1,1075 +1,1080 @@
 .. _resource:
 
 Cluster Resources
 -----------------
 
 .. _s-resource-primitive:
 
 What is a Cluster Resource?
 ###########################
 
 .. index::
    single: resource
 
 A resource is a service made highly available by a cluster.
 The simplest type of resource, a *primitive* resource, is described
 in this chapter. More complex forms, such as groups and clones,
 are described in later chapters.
 
 Every primitive resource has a *resource agent*. A resource agent is an
 external program that abstracts the service it provides and present a
 consistent view to the cluster.
 
 This allows the cluster to be agnostic about the resources it manages.
 The cluster doesn't need to understand how the resource works because
 it relies on the resource agent to do the right thing when given a
 **start**, **stop** or **monitor** command. For this reason, it is crucial
 that resource agents are well-tested.
 
 Typically, resource agents come in the form of shell scripts. However,
 they can be written using any technology (such as C, Python or Perl)
 that the author is comfortable with.
 
 .. _s-resource-supported:
 
 .. index::
    single: resource; class
  
 Resource Classes
 ################
 
 Pacemaker supports several classes of agents:
 
 * OCF
 * LSB
 * Systemd
 * Upstart (deprecated)
 * Service
 * Fencing
 * Nagios Plugins
 
 .. index::
    single: resource; OCF
    single: OCF; resources
    single: Open Cluster Framework; resources
 
 Open Cluster Framework
 ______________________
 
 The OCF standard [#]_ is basically an extension of the Linux Standard
 Base conventions for init scripts to:
 
 * support parameters,
 * make them self-describing, and
 * make them extensible
 
 OCF specs have strict definitions of the exit codes that actions must return [#]_.
 
 The cluster follows these specifications exactly, and giving the wrong
 exit code will cause the cluster to behave in ways you will likely
 find puzzling and annoying.  In particular, the cluster needs to
 distinguish a completely stopped resource from one which is in some
 erroneous and indeterminate state.
 
 Parameters are passed to the resource agent as environment variables, with the
 special prefix ``OCF_RESKEY_``.  So, a parameter which the user thinks
 of as ``ip`` will be passed to the resource agent as ``OCF_RESKEY_ip``.  The
 number and purpose of the parameters is left to the resource agent; however,
 the resource agent should use the **meta-data** command to advertise any that it
 supports.
 
 The OCF class is the most preferred as it is an industry standard,
 highly flexible (allowing parameters to be passed to agents in a
 non-positional manner) and self-describing.
 
 For more information, see the
 `reference <http://www.linux-ha.org/wiki/OCF_Resource_Agents>`_ and
 the *Resource Agents* chapter of *Pacemaker Administration*.
 
 .. index::
    single: resource; LSB
    single: LSB; resources
    single: Linux Standard Base; resources
 
 Linux Standard Base
 ___________________
 
 *LSB* resource agents are more commonly known as *init scripts*. If a full path
 is not given, they are assumed to be located in ``/etc/init.d``.
 
 Commonly, they are provided by the OS distribution. In order to be used
 with a Pacemaker cluster, they must conform to the LSB specification [#]_.
 
 .. warning::
 
    Many distributions or particular software packages claim LSB compliance
    but ship with broken init scripts.  For details on how to check whether
    your init script is LSB-compatible, see the `Resource Agents` chapter of
    `Pacemaker Administration`. Common problematic violations of the LSB
    standard include:
 
    * Not implementing the ``status`` operation at all
    * Not observing the correct exit status codes for
      ``start``/``stop``/``status`` actions
    * Starting a started resource returns an error
    * Stopping a stopped resource returns an error
 
 .. important::
 
    Remember to make sure the computer is `not` configured to start any
    services at boot time -- that should be controlled by the cluster.
 
 .. _s-resource-supported-systemd:
 
 .. index::
    single: Resource; Systemd
    single: Systemd; resources
 
 Systemd
 _______
 
 Most Linux distributions have replaced the old
 `SysV <http://en.wikipedia.org/wiki/Init#SysV-style>`_ style of
 initialization daemons and scripts with
 `Systemd <http://www.freedesktop.org/wiki/Software/systemd>`_.
 
 Pacemaker is able to manage these services `if they are present`.
 
 Instead of init scripts, systemd has `unit files`.  Generally, the
 services (unit files) are provided by the OS distribution, but there
 are online guides for converting from init scripts [#]_.
 
 .. important::
 
    Remember to make sure the computer is `not` configured to start any
    services at boot time -- that should be controlled by the cluster.
 
 .. index::
    single: Resource; Upstart
    single: Upstart; resources
 
 Upstart
 _______
 
 Some distributions replaced the old
 `SysV <http://en.wikipedia.org/wiki/Init#SysV-style>`_ style of
 initialization daemons (and scripts) with
 `Upstart <http://upstart.ubuntu.com/>`_.
 
 Pacemaker is able to manage these services `if they are present`.
 
 Instead of init scripts, Upstart has `jobs`.  Generally, the
 services (jobs) are provided by the OS distribution.
 
 .. important::
 
    Remember to make sure the computer is `not` configured to start any
    services at boot time -- that should be controlled by the cluster.
 
 .. warning::
 
    Upstart support is deprecated in Pacemaker. Upstart is no longer an actively
    maintained project, and test platforms for it are no longer readily usable.
    Support will likely be dropped entirely at the next major release of
    Pacemaker.
 
 
 .. index::
    single: Resource; System Services
    single: System Service; resources
 
 System Services
 _______________
 
 Since there are various types of system services (``systemd``,
 ``upstart``, and ``lsb``), Pacemaker supports a special ``service`` alias which
 intelligently figures out which one applies to a given cluster node.
 
 This is particularly useful when the cluster contains a mix of
 ``systemd``, ``upstart``, and ``lsb``.
 
 In order, Pacemaker will try to find the named service as:
 
 * an LSB init script
 
 * a Systemd unit file
 
 * an Upstart job
 
 .. index::
    single: Resource; STONITH
    single: STONITH; resources
 
 STONITH
 _______
 
 The STONITH class is used exclusively for fencing-related resources.  This is
 discussed later in :ref:`fencing`.
 
 .. index::
    single: Resource; Nagios Plugins
    single: Nagios Plugins; resources
 
 Nagios Plugins
 ______________
 
 Nagios Plugins [#]_ are a way to monitor services. Pacemaker can use these as
 resources, to react to a change in the service's status.
 
 To use plugins as resources, Pacemaker must have been built with support, and
 OCF-style meta-data for the plugins must be installed on nodes that can run
 them. Meta-data for several common plugins is provided by the
 `nagios-agents-metadata <https://github.com/ClusterLabs/nagios-agents-metadata>`_
 project.
 
 The supported parameters for such a resource are same as the long options of
 the plugin.
 
 Start and monitor actions for plugin resources are implemented as invoking the
 plugin. A plugin result of "OK" (0) is treated as success, a result of "WARN"
 (1) is treated as a successful but degraded service, and any other result is
 considered a failure.
 
 A plugin resource is not going to change its status after recovery by
 restarting the plugin, so using them alone does not make sense with ``on-fail``
 set (or left to default) to ``restart``. Another value could make sense, for
 example, if you want to fence or standby nodes that cannot reach some external
 service.
 
 A more common use case for plugin resources is to configure them with a
 ``container`` meta-attribute set to the name of another resource that actually
 makes the service available, such as a virtual machine or container.
 
 With ``container`` set, the plugin resource will automatically be colocated
 with the containing resource and ordered after it, and the containing resource
 will be considered failed if the plugin resource fails. This allows monitoring
 of a service inside a virtual machine or container, with recovery of the
 virtual machine or container if the service fails.
 
 Configuring a virtual machine as a guest node, or a container as a
 :ref:`bundle <s-resource-bundle>`, is the preferred way of monitoring a service
 inside, but plugin resources can be useful when it is not practical to modify
 the virtual machine or container image for this purpose.
 
 
 .. _primitive-resource:
 
 Resource Properties
 ###################
 
 These values tell the cluster which resource agent to use for the resource,
 where to find that resource agent and what standards it conforms to.
 
 .. table:: **Properties of a Primitive Resource**
+   :widths: 1 4
 
    +----------+------------------------------------------------------------------+
    | Field    | Description                                                      |
    +==========+==================================================================+
    | id       | .. index::                                                       |
    |          |    single: id; resource                                          |
    |          |    single: resource; property, id                                |
    |          |                                                                  |
    |          | Your name for the resource                                       |
    +----------+------------------------------------------------------------------+
    | class    | .. index::                                                       |
    |          |    single: class; resource                                       |
    |          |    single: resource; property, class                             |
    |          |                                                                  |
    |          | The standard the resource agent conforms to. Allowed values:     |
    |          | ``lsb``, ``nagios``, ``ocf``, ``service``, ``stonith``,          |
    |          | ``systemd``, ``upstart``                                         |
    +----------+------------------------------------------------------------------+
    | type     | .. index::                                                       |
    |          |    single: type; resource                                        |
    |          |    single: resource; property, type                              |
    |          |                                                                  |
    |          | The name of the Resource Agent you wish to use. E.g.             |
    |          | ``IPaddr`` or ``Filesystem``                                     |
    +----------+------------------------------------------------------------------+
    | provider | .. index::                                                       |
    |          |    single: provider; resource                                    |
    |          |    single: resource; property, provider                          |
    |          |                                                                  |
    |          | The OCF spec allows multiple vendors to supply the same resource |
    |          | agent. To use the OCF resource agents supplied by the Heartbeat  |
    |          | project, you would specify ``heartbeat`` here.                   |
    +----------+------------------------------------------------------------------+
 
 The XML definition of a resource can be queried with the **crm_resource** tool.
 For example:
 
 .. code-block:: none
 
    # crm_resource --resource Email --query-xml
 
 might produce:
 
 .. topic:: A system resource definition
 
    .. code-block:: xml
 
       <primitive id="Email" class="service" type="exim"/>
 
 .. note::
 
    One of the main drawbacks to system services (LSB, systemd or
    Upstart) resources is that they do not allow any parameters!
 
 .. topic:: An OCF resource definition
 
    .. code-block:: xml
 
       <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
          <instance_attributes id="Public-IP-params">
             <nvpair id="Public-IP-ip" name="ip" value="192.0.2.2"/>
          </instance_attributes>
       </primitive>
 
 .. _resource_options:
 
 Resource Options
 ################
 
 Resources have two types of options: *meta-attributes* and *instance attributes*.
 Meta-attributes apply to any type of resource, while instance attributes
 are specific to each resource agent.
 
 Resource Meta-Attributes
 ________________________
 
 Meta-attributes are used by the cluster to decide how a resource should
 behave and can be easily set using the ``--meta`` option of the
 **crm_resource** command.
 
 .. table:: **Meta-attributes of a Primitive Resource**
+   :class: longtable
+   :widths: 2 2 3
 
    +----------------------------+----------------------------------+------------------------------------------------------+
    | Field                      | Default                          | Description                                          |
    +============================+==================================+======================================================+
    | priority                   | 0                                | .. index::                                           |
    |                            |                                  |    single: priority; resource option                 |
    |                            |                                  |    single: resource; option, priority                |
    |                            |                                  |                                                      |
    |                            |                                  | If not all resources can be active, the cluster      |
    |                            |                                  | will stop lower priority resources in order to       |
    |                            |                                  | keep higher priority ones active.                    |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | critical                   | true                             | .. index::                                           |
    |                            |                                  |    single: critical; resource option                 |
    |                            |                                  |    single: resource; option, critical                |
    |                            |                                  |                                                      |
    |                            |                                  | Use this value as the default for ``influence`` in   |
    |                            |                                  | all :ref:`colocation constraints                     |
    |                            |                                  | <s-resource-colocation>` involving this resource,    |
    |                            |                                  | as well as the implicit colocation constraints       |
    |                            |                                  | created if this resource is in a :ref:`group         |
    |                            |                                  | <group-resources>`. For details, see                 |
    |                            |                                  | :ref:`s-coloc-influence`. *(since 2.1.0)*            |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | target-role                | Started                          | .. index::                                           |
    |                            |                                  |    single: target-role; resource option              |
    |                            |                                  |    single: resource; option, target-role             |
    |                            |                                  |                                                      |
    |                            |                                  | What state should the cluster attempt to keep this   |
    |                            |                                  | resource in? Allowed values:                         |
    |                            |                                  |                                                      |
    |                            |                                  | * ``Stopped:`` Force the resource to be stopped      |
    |                            |                                  | * ``Started:`` Allow the resource to be started      |
    |                            |                                  |   (and in the case of :ref:`promotable clone         |
    |                            |                                  |   resources <s-resource-promotable>`, promoted       |
    |                            |                                  |   if appropriate)                                    |
    |                            |                                  | * ``Unpromoted:`` Allow the resource to be started,  |
    |                            |                                  |   but only in the unpromoted role if the resource is |
    |                            |                                  |   :ref:`promotable <s-resource-promotable>`          |
    |                            |                                  | * ``Promoted:`` Equivalent to ``Started``            |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | is-managed                 | TRUE                             | .. index::                                           |
    |                            |                                  |    single: is-managed; resource option               |
    |                            |                                  |    single: resource; option, is-managed              |
    |                            |                                  |                                                      |
    |                            |                                  | Is the cluster allowed to start and stop             |
    |                            |                                  | the resource?  Allowed values: ``true``, ``false``   |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | maintenance                | FALSE                            | .. index::                                           |
    |                            |                                  |    single: maintenance; resource option              |
    |                            |                                  |    single: resource; option, maintenance             |
    |                            |                                  |                                                      |
    |                            |                                  | Similar to the ``maintenance-mode``                  |
    |                            |                                  | :ref:`cluster option <cluster_options>`, but for     |
    |                            |                                  | a single resource. If true, the resource will not    |
    |                            |                                  | be started, stopped, or monitored on any node. This  |
    |                            |                                  | differs from ``is-managed`` in that monitors will    |
    |                            |                                  | not be run. Allowed values: ``true``, ``false``      |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | resource-stickiness        | 1 for individual clone           | .. _resource-stickiness:                             |
    |                            | instances, 0 for all             |                                                      |
    |                            | other resources                  | .. index::                                           |
    |                            |                                  |    single: resource-stickiness; resource option      |
    |                            |                                  |    single: resource; option, resource-stickiness     |
    |                            |                                  |                                                      |
    |                            |                                  | A score that will be added to the current node when  |
    |                            |                                  | a resource is already active. This allows running    |
    |                            |                                  | resources to stay where they are, even if they       |
    |                            |                                  | would be placed elsewhere if they were being         |
    |                            |                                  | started from a stopped state.                        |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | requires                   | ``quorum`` for resources         | .. _requires:                                        |
    |                            | with a ``class`` of ``stonith``, |                                                      |
    |                            | otherwise ``unfencing`` if       | .. index::                                           |
    |                            | unfencing is active in the       |    single: requires; resource option                 |
    |                            | cluster, otherwise ``fencing``   |    single: resource; option, requires                |
    |                            | if ``stonith-enabled`` is true,  |                                                      |
    |                            | otherwise ``quorum``             | Conditions under which the resource can be           |
    |                            |                                  | started. Allowed values:                             |
    |                            |                                  |                                                      |
    |                            |                                  | * ``nothing:`` can always be started                 |
    |                            |                                  | * ``quorum:`` The cluster can only start this        |
    |                            |                                  |   resource if a majority of the configured nodes     |
    |                            |                                  |   are active                                         |
    |                            |                                  | * ``fencing:`` The cluster can only start this       |
    |                            |                                  |   resource if a majority of the configured nodes     |
    |                            |                                  |   are active *and* any failed or unknown nodes       |
    |                            |                                  |   have been :ref:`fenced <fencing>`                  |
    |                            |                                  | * ``unfencing:`` The cluster can only start this     |
    |                            |                                  |   resource if a majority of the configured nodes     |
    |                            |                                  |   are active *and* any failed or unknown nodes have  |
    |                            |                                  |   been fenced *and* only on nodes that have been     |
    |                            |                                  |   :ref:`unfenced <unfencing>`                        |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | migration-threshold        | INFINITY                         | .. index::                                           |
    |                            |                                  |    single: migration-threshold; resource option      |
    |                            |                                  |    single: resource; option, migration-threshold     |
    |                            |                                  |                                                      |
    |                            |                                  | How many failures may occur for this resource on     |
    |                            |                                  | a node, before this node is marked ineligible to     |
    |                            |                                  | host this resource. A value of 0 indicates that this |
    |                            |                                  | feature is disabled (the node will never be marked   |
    |                            |                                  | ineligible); by constrast, the cluster treats        |
    |                            |                                  | INFINITY (the default) as a very large but finite    |
    |                            |                                  | number. This option has an effect only if the        |
    |                            |                                  | failed operation specifies ``on-fail`` as            |
    |                            |                                  | ``restart`` (the default), and additionally for      |
    |                            |                                  | failed ``start`` operations, if the cluster          |
    |                            |                                  | property ``start-failure-is-fatal`` is ``false``.    |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | failure-timeout            | 0                                | .. index::                                           |
    |                            |                                  |    single: failure-timeout; resource option          |
    |                            |                                  |    single: resource; option, failure-timeout         |
    |                            |                                  |                                                      |
    |                            |                                  | How many seconds to wait before acting as if the     |
    |                            |                                  | failure had not occurred, and potentially allowing   |
    |                            |                                  | the resource back to the node on which it failed.    |
    |                            |                                  | A value of 0 indicates that this feature is          |
    |                            |                                  | disabled.                                            |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | multiple-active            | stop_start                       | .. index::                                           |
    |                            |                                  |    single: multiple-active; resource option          |
    |                            |                                  |    single: resource; option, multiple-active         |
    |                            |                                  |                                                      |
    |                            |                                  | What should the cluster do if it ever finds the      |
    |                            |                                  | resource active on more than one node? Allowed       |
    |                            |                                  | values:                                              |
    |                            |                                  |                                                      |
    |                            |                                  | * ``block``: mark the resource as unmanaged          |
    |                            |                                  | * ``stop_only``: stop all active instances and       |
    |                            |                                  |   leave them that way                                |
    |                            |                                  | * ``stop_start``: stop all active instances and      |
    |                            |                                  |   start the resource in one location only            |
    |                            |                                  | * ``stop_unexpected``: stop all active instances     |
    |                            |                                  |   except where the resource should be active (this   |
    |                            |                                  |   should be used only when extra instances are not   |
    |                            |                                  |   expected to disrupt existing instances, and the    |
    |                            |                                  |   resource agent's monitor of an existing instance   |
    |                            |                                  |   is capable of detecting any problems that could be |
    |                            |                                  |   caused; note that any resources ordered after this |
    |                            |                                  |   will still need to be restarted)                   |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | allow-migrate              | TRUE for ocf:pacemaker:remote    | Whether the cluster should try to "live migrate"     |
    |                            | resources, FALSE otherwise       | this resource when it needs to be moved (see         |
    |                            |                                  | :ref:`live-migration`)                               |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | allow-unhealthy-nodes      | FALSE                            | Whether the resource should be able to run on a node |
    |                            |                                  | even if the node's health score would otherwise      |
    |                            |                                  | prevent it (see :ref:`node-health`) *(since 2.1.3)*  |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | container-attribute-target |                                  | Specific to bundle resources; see                    |
    |                            |                                  | :ref:`s-bundle-attributes`                           |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | remote-node                |                                  | The name of the Pacemaker Remote guest node this     |
    |                            |                                  | resource is associated with, if any. If              |
    |                            |                                  | specified, this both enables the resource as a       |
    |                            |                                  | guest node and defines the unique name used to       |
    |                            |                                  | identify the guest node. The guest must be           |
    |                            |                                  | configured to run the Pacemaker Remote daemon        |
    |                            |                                  | when it is started. **WARNING:** This value          |
    |                            |                                  | cannot overlap with any resource or node IDs.        |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | remote-port                | 3121                             | If ``remote-node`` is specified, the port on the     |
    |                            |                                  | guest used for its Pacemaker Remote connection.      |
    |                            |                                  | The Pacemaker Remote daemon on the guest must        |
    |                            |                                  | be configured to listen on this port.                |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | remote-addr                | value of ``remote-node``         | If ``remote-node`` is specified, the IP              |
    |                            |                                  | address or hostname used to connect to the           |
    |                            |                                  | guest via Pacemaker Remote. The Pacemaker Remote     |
    |                            |                                  | daemon on the guest must be configured to accept     |
    |                            |                                  | connections on this address.                         |
    +----------------------------+----------------------------------+------------------------------------------------------+
    | remote-connect-timeout     | 60s                              | If ``remote-node`` is specified, how long before     |
    |                            |                                  | a pending guest connection will time out.            |
    +----------------------------+----------------------------------+------------------------------------------------------+
 
 As an example of setting resource options, if you performed the following
 commands on an LSB Email resource:
 
 .. code-block:: none
 
    # crm_resource --meta --resource Email --set-parameter priority --parameter-value 100
    # crm_resource -m -r Email -p multiple-active -v block
 
 the resulting resource definition might be:
 
 .. topic:: An LSB resource with cluster options
 
    .. code-block:: xml
 
       <primitive id="Email" class="lsb" type="exim">
         <meta_attributes id="Email-meta_attributes">
           <nvpair id="Email-meta_attributes-priority" name="priority" value="100"/>
           <nvpair id="Email-meta_attributes-multiple-active" name="multiple-active" value="block"/>
         </meta_attributes>
       </primitive>
 
 In addition to the cluster-defined meta-attributes described above, you may
 also configure arbitrary meta-attributes of your own choosing. Most commonly,
 this would be done for use in :ref:`rules <rules>`. For example, an IT department
 might define a custom meta-attribute to indicate which company department each
 resource is intended for. To reduce the chance of name collisions with
 cluster-defined meta-attributes added in the future, it is recommended to use
 a unique, organization-specific prefix for such attributes.
 
 .. _s-resource-defaults:
 
 Setting Global Defaults for Resource Meta-Attributes
 ____________________________________________________
 
 To set a default value for a resource option, add it to the
 ``rsc_defaults`` section with ``crm_attribute``. For example,
 
 .. code-block:: none
 
    # crm_attribute --type rsc_defaults --name is-managed --update false
 
 would prevent the cluster from starting or stopping any of the
 resources in the configuration (unless of course the individual
 resources were specifically enabled by having their ``is-managed`` set to
 ``true``).
 
 Resource Instance Attributes
 ____________________________
 
 The resource agents of some resource classes (lsb, systemd and upstart *not* among them)
 can be given parameters which determine how they behave and which instance
 of a service they control.
 
 If your resource agent supports parameters, you can add them with the
 ``crm_resource`` command. For example,
 
 .. code-block:: none
 
    # crm_resource --resource Public-IP --set-parameter ip --parameter-value 192.0.2.2
 
 would create an entry in the resource like this:
 
 .. topic:: An example OCF resource with instance attributes
 
    .. code-block:: xml
 
       <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
          <instance_attributes id="params-public-ip">
             <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
          </instance_attributes>
       </primitive>
 
 For an OCF resource, the result would be an environment variable
 called ``OCF_RESKEY_ip`` with a value of ``192.0.2.2``.
 
 The list of instance attributes supported by an OCF resource agent can be
 found by calling the resource agent with the ``meta-data`` command.
 The output contains an XML description of all the supported
 attributes, their purpose and default values.
 
 .. topic:: Displaying the metadata for the Dummy resource agent template
 
    .. code-block:: none
 
       # export OCF_ROOT=/usr/lib/ocf
       # $OCF_ROOT/resource.d/pacemaker/Dummy meta-data
 
    .. code-block:: xml
 
       <?xml version="1.0"?>
       <!DOCTYPE resource-agent SYSTEM "ra-api-1.dtd">
       <resource-agent name="Dummy" version="2.0">
       <version>1.1</version>
 
       <longdesc lang="en">
       This is a dummy OCF resource agent. It does absolutely nothing except keep track
       of whether it is running or not, and can be configured so that actions fail or
       take a long time. Its purpose is primarily for testing, and to serve as a
       template for resource agent writers.
       </longdesc>
       <shortdesc lang="en">Example stateless resource agent</shortdesc>
 
       <parameters>
       <parameter name="state" unique-group="state">
       <longdesc lang="en">
       Location to store the resource state in.
       </longdesc>
       <shortdesc lang="en">State file</shortdesc>
       <content type="string" default="/var/run/Dummy-RESOURCE_ID.state" />
       </parameter>
 
       <parameter name="passwd" reloadable="1">
       <longdesc lang="en">
       Fake password field
       </longdesc>
       <shortdesc lang="en">Password</shortdesc>
       <content type="string" default="" />
       </parameter>
 
       <parameter name="fake" reloadable="1">
       <longdesc lang="en">
       Fake attribute that can be changed to cause a reload
       </longdesc>
       <shortdesc lang="en">Fake attribute that can be changed to cause a reload</shortdesc>
       <content type="string" default="dummy" />
       </parameter>
 
       <parameter name="op_sleep" reloadable="1">
       <longdesc lang="en">
       Number of seconds to sleep during operations.  This can be used to test how
       the cluster reacts to operation timeouts.
       </longdesc>
       <shortdesc lang="en">Operation sleep duration in seconds.</shortdesc>
       <content type="string" default="0" />
       </parameter>
 
       <parameter name="fail_start_on" reloadable="1">
       <longdesc lang="en">
       Start, migrate_from, and reload-agent actions will return failure if running on
       the host specified here, but the resource will run successfully anyway (future
       monitor calls will find it running). This can be used to test on-fail=ignore.
       </longdesc>
       <shortdesc lang="en">Report bogus start failure on specified host</shortdesc>
       <content type="string" default="" />
       </parameter>
       <parameter name="envfile" reloadable="1">
       <longdesc lang="en">
       If this is set, the environment will be dumped to this file for every call.
       </longdesc>
       <shortdesc lang="en">Environment dump file</shortdesc>
       <content type="string" default="" />
       </parameter>
 
       </parameters>
 
       <actions>
       <action name="start"        timeout="20s" />
       <action name="stop"         timeout="20s" />
       <action name="monitor"      timeout="20s" interval="10s" depth="0"/>
       <action name="reload"       timeout="20s" />
       <action name="reload-agent" timeout="20s" />
       <action name="migrate_to"   timeout="20s" />
       <action name="migrate_from" timeout="20s" />
       <action name="validate-all" timeout="20s" />
       <action name="meta-data"    timeout="5s" />
       </actions>
       </resource-agent>
 
 .. index::
    single: resource; action
    single: resource; operation
 
 .. _operation:
 
 Resource Operations
 ###################
 
 *Operations* are actions the cluster can perform on a resource by calling the
 resource agent. Resource agents must support certain common operations such as
 start, stop, and monitor, and may implement any others.
 
 Operations may be explicitly configured for two purposes: to override defaults
 for options (such as timeout) that the cluster will use whenever it initiates
 the operation, and to run an operation on a recurring basis (for example, to
 monitor the resource for failure).
 
 .. topic:: An OCF resource with a non-default start timeout
 
    .. code-block:: xml
 
       <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
         <operations>
            <op id="Public-IP-start" name="start" timeout="60s"/>
         </operations>
         <instance_attributes id="params-public-ip">
            <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
         </instance_attributes>
       </primitive>
 
 Pacemaker identifies operations by a combination of name and interval, so this
 combination must be unique for each resource. That is, you should not configure
 two operations for the same resource with the same name and interval.
 
 .. _operation_properties:
 
 Operation Properties
 ____________________
 
 Operation properties may be specified directly in the ``op`` element as
 XML attributes, or in a separate ``meta_attributes`` block as ``nvpair`` elements.
 XML attributes take precedence over ``nvpair`` elements if both are specified.
 
 .. table:: **Properties of an Operation**
+   :class: longtable
+   :widths: 1 2 3
 
    +----------------+-----------------------------------+-----------------------------------------------------+
    | Field          | Default                           | Description                                         |
    +================+===================================+=====================================================+
    | id             |                                   | .. index::                                          |
    |                |                                   |    single: id; action property                      |
    |                |                                   |    single: action; property, id                     |
    |                |                                   |                                                     |
    |                |                                   | A unique name for the operation.                    |
    +----------------+-----------------------------------+-----------------------------------------------------+
    | name           |                                   | .. index::                                          |
    |                |                                   |    single: name; action property                    |
    |                |                                   |    single: action; property, name                   |
    |                |                                   |                                                     |
    |                |                                   | The action to perform. This can be any action       |
    |                |                                   | supported by the agent; common values include       |
    |                |                                   | ``monitor``, ``start``, and ``stop``.               |
    +----------------+-----------------------------------+-----------------------------------------------------+
    | interval       | 0                                 | .. index::                                          |
    |                |                                   |    single: interval; action property                |
    |                |                                   |    single: action; property, interval               |
    |                |                                   |                                                     |
    |                |                                   | How frequently (in seconds) to perform the          |
    |                |                                   | operation. A value of 0 means "when needed".        |
    |                |                                   | A positive value defines a *recurring action*,      |
    |                |                                   | which is typically used with                        |
    |                |                                   | :ref:`monitor <s-resource-monitoring>`.             |
    +----------------+-----------------------------------+-----------------------------------------------------+
    | timeout        |                                   | .. index::                                          |
    |                |                                   |    single: timeout; action property                 |
    |                |                                   |    single: action; property, timeout                |
    |                |                                   |                                                     |
    |                |                                   | How long to wait before declaring the action        |
    |                |                                   | has failed                                          |
    +----------------+-----------------------------------+-----------------------------------------------------+
    | on-fail        | Varies by action:                 | .. index::                                          |
    |                |                                   |    single: on-fail; action property                 |
    |                | * ``stop``: ``fence`` if          |    single: action; property, on-fail                |
    |                |   ``stonith-enabled`` is true     |                                                     |
    |                |   or ``block`` otherwise          | The action to take if this action ever fails.       |
    |                | * ``demote``: ``on-fail`` of the  | Allowed values:                                     |
    |                |   ``monitor`` action with         |                                                     |
    |                |   ``role`` set to ``Promoted``,   | * ``ignore:`` Pretend the resource did not fail.    |
    |                |   if present, enabled, and        | * ``block:`` Don't perform any further operations   |
    |                |   configured to a value other     |   on the resource.                                  |
    |                |   than ``demote``, or ``restart`` | * ``stop:`` Stop the resource and do not start      |
    |                |   otherwise                       |   it elsewhere.                                     |
    |                | * all other actions: ``restart``  | * ``demote:`` Demote the resource, without a        |
    |                |                                   |   full restart. This is valid only for ``promote``  |
    |                |                                   |   actions, and for ``monitor`` actions with both    |
    |                |                                   |   a nonzero ``interval`` and ``role`` set to        |
    |                |                                   |   ``Promoted``; for any other action, a             |
    |                |                                   |   configuration error will be logged, and the       |
    |                |                                   |   default behavior will be used. *(since 2.0.5)*    |
    |                |                                   | * ``restart:`` Stop the resource and start it       |
    |                |                                   |   again (possibly on a different node).             |
    |                |                                   | * ``fence:`` STONITH the node on which the          |
    |                |                                   |   resource failed.                                  |
    |                |                                   | * ``standby:`` Move *all* resources away from the   |
    |                |                                   |   node on which the resource failed.                |
    +----------------+-----------------------------------+-----------------------------------------------------+
    | enabled        | TRUE                              | .. index::                                          |
    |                |                                   |    single: enabled; action property                 |
    |                |                                   |    single: action; property, enabled                |
    |                |                                   |                                                     |
    |                |                                   | If ``false``, ignore this operation definition.     |
    |                |                                   | This is typically used to pause a particular        |
    |                |                                   | recurring ``monitor`` operation; for instance, it   |
    |                |                                   | can complement the respective resource being        |
    |                |                                   | unmanaged (``is-managed=false``), as this alone     |
    |                |                                   | will :ref:`not block any configured monitoring      |
    |                |                                   | <s-monitoring-unmanaged>`.  Disabling the operation |
    |                |                                   | does not suppress all actions of the given type.    |
    |                |                                   | Allowed values: ``true``, ``false``.                |
    +----------------+-----------------------------------+-----------------------------------------------------+
    | record-pending | TRUE                              | .. index::                                          |
    |                |                                   |    single: record-pending; action property          |
    |                |                                   |    single: action; property, record-pending         |
    |                |                                   |                                                     |
    |                |                                   | If ``true``, the intention to perform the operation |
    |                |                                   | is recorded so that GUIs and CLI tools can indicate |
    |                |                                   | that an operation is in progress.  This is best set |
    |                |                                   | as an *operation default*                           |
    |                |                                   | (see :ref:`s-operation-defaults`).  Allowed values: |
    |                |                                   | ``true``, ``false``.                                |
    +----------------+-----------------------------------+-----------------------------------------------------+
    | role           |                                   | .. index::                                          |
    |                |                                   |    single: role; action property                    |
    |                |                                   |    single: action; property, role                   |
    |                |                                   |                                                     |
    |                |                                   | Run the operation only on node(s) that the cluster  |
    |                |                                   | thinks should be in the specified role. This only   |
    |                |                                   | makes sense for recurring ``monitor`` operations.   |
    |                |                                   | Allowed (case-sensitive) values: ``Stopped``,       |
    |                |                                   | ``Started``, and in the case of :ref:`promotable    |
    |                |                                   | clone resources <s-resource-promotable>`,           |
    |                |                                   | ``Unpromoted`` and ``Promoted``.                    |
    +----------------+-----------------------------------+-----------------------------------------------------+
 
 .. note::
 
    When ``on-fail`` is set to ``demote``, recovery from failure by a successful
    demote causes the cluster to recalculate whether and where a new instance
    should be promoted. The node with the failure is eligible, so if promotion
    scores have not changed, it will be promoted again.
 
    There is no direct equivalent of ``migration-threshold`` for the promoted
    role, but the same effect can be achieved with a location constraint using a
    :ref:`rule <rules>` with a node attribute expression for the resource's fail
    count.
 
    For example, to immediately ban the promoted role from a node with any
    failed promote or promoted instance monitor:
 
    .. code-block:: xml
 
       <rsc_location id="loc1" rsc="my_primitive">
           <rule id="rule1" score="-INFINITY" role="Promoted" boolean-op="or">
             <expression id="expr1" attribute="fail-count-my_primitive#promote_0"
               operation="gte" value="1"/>
             <expression id="expr2" attribute="fail-count-my_primitive#monitor_10000"
               operation="gte" value="1"/>
           </rule>
       </rsc_location>
 
    This example assumes that there is a promotable clone of the ``my_primitive``
    resource (note that the primitive name, not the clone name, is used in the
    rule), and that there is a recurring 10-second-interval monitor configured for
    the promoted role (fail count attributes specify the interval in
    milliseconds).
 
 .. _s-resource-monitoring:
 
 Monitoring Resources for Failure
 ________________________________
 
 When Pacemaker first starts a resource, it runs one-time ``monitor`` operations
 (referred to as *probes*) to ensure the resource is running where it's
 supposed to be, and not running where it's not supposed to be. (This behavior
 can be affected by the ``resource-discovery`` location constraint property.)
 
 Other than those initial probes, Pacemaker will *not* (by default) check that
 the resource continues to stay healthy [#]_.  You must configure ``monitor``
 operations explicitly to perform these checks.
 
 .. topic:: An OCF resource with a recurring health check
 
    .. code-block:: xml
 
       <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
         <operations>
            <op id="Public-IP-start" name="start" timeout="60s"/>
            <op id="Public-IP-monitor" name="monitor" interval="60s"/>
         </operations>
         <instance_attributes id="params-public-ip">
            <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
         </instance_attributes>
       </primitive>
 
 By default, a ``monitor`` operation will ensure that the resource is running
 where it is supposed to. The ``target-role`` property can be used for further
 checking.
 
 For example, if a resource has one ``monitor`` operation with
 ``interval=10 role=Started`` and a second ``monitor`` operation with
 ``interval=11 role=Stopped``, the cluster will run the first monitor on any nodes
 it thinks *should* be running the resource, and the second monitor on any nodes
 that it thinks *should not* be running the resource (for the truly paranoid,
 who want to know when an administrator manually starts a service by mistake).
 
 .. note::
 
    Currently, monitors with ``role=Stopped`` are not implemented for
    :ref:`clone <s-resource-clone>` resources.
 
 .. _s-monitoring-unmanaged:
 
 Monitoring Resources When Administration is Disabled
 ____________________________________________________
 
 Recurring ``monitor`` operations behave differently under various administrative
 settings:
 
 * When a resource is unmanaged (by setting ``is-managed=false``): No monitors
   will be stopped.
 
   If the unmanaged resource is stopped on a node where the cluster thinks it
   should be running, the cluster will detect and report that it is not, but it
   will not consider the monitor failed, and will not try to start the resource
   until it is managed again.
 
   Starting the unmanaged resource on a different node is strongly discouraged
   and will at least cause the cluster to consider the resource failed, and
   may require the resource's ``target-role`` to be set to ``Stopped`` then
   ``Started`` to be recovered.
 
 * When a node is put into standby: All resources will be moved away from the
   node, and all ``monitor`` operations will be stopped on the node, except those
   specifying ``role`` as ``Stopped`` (which will be newly initiated if
   appropriate).
 
 * When the cluster is put into maintenance mode: All resources will be marked
   as unmanaged. All monitor operations will be stopped, except those
   specifying ``role`` as ``Stopped`` (which will be newly initiated if
   appropriate). As with single unmanaged resources, starting
   a resource on a node other than where the cluster expects it to be will
   cause problems.
 
 .. _s-operation-defaults:
 
 Setting Global Defaults for Operations
 ______________________________________
 
 You can change the global default values for operation properties
 in a given cluster. These are defined in an ``op_defaults`` section 
 of the CIB's ``configuration`` section, and can be set with
 ``crm_attribute``.  For example,
 
 .. code-block:: none
 
    # crm_attribute --type op_defaults --name timeout --update 20s
 
 would default each operation's ``timeout`` to 20 seconds.  If an
 operation's definition also includes a value for ``timeout``, then that
 value would be used for that operation instead.
 
 When Implicit Operations Take a Long Time
 _________________________________________
 
 The cluster will always perform a number of implicit operations: ``start``,
 ``stop`` and a non-recurring ``monitor`` operation used at startup to check
 whether the resource is already active.  If one of these is taking too long,
 then you can create an entry for them and specify a longer timeout.
 
 .. topic:: An OCF resource with custom timeouts for its implicit actions
 
    .. code-block:: xml
 
       <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
         <operations>
            <op id="public-ip-startup" name="monitor" interval="0" timeout="90s"/>
            <op id="public-ip-start" name="start" interval="0" timeout="180s"/>
            <op id="public-ip-stop" name="stop" interval="0" timeout="15min"/>
         </operations>
         <instance_attributes id="params-public-ip">
            <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
         </instance_attributes>
       </primitive>
 
 Multiple Monitor Operations
 ___________________________
 
 Provided no two operations (for a single resource) have the same name
 and interval, you can have as many ``monitor`` operations as you like.
 In this way, you can do a superficial health check every minute and
 progressively more intense ones at higher intervals.
 
 To tell the resource agent what kind of check to perform, you need to
 provide each monitor with a different value for a common parameter.
 The OCF standard creates a special parameter called ``OCF_CHECK_LEVEL``
 for this purpose and dictates that it is "made available to the
 resource agent without the normal ``OCF_RESKEY`` prefix".
 
 Whatever name you choose, you can specify it by adding an
 ``instance_attributes`` block to the ``op`` tag. It is up to each
 resource agent to look for the parameter and decide how to use it.
 
 .. topic:: An OCF resource with two recurring health checks, performing
            different levels of checks specified via ``OCF_CHECK_LEVEL``.
 
    .. code-block:: xml
 
       <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
          <operations>
             <op id="public-ip-health-60" name="monitor" interval="60">
                <instance_attributes id="params-public-ip-depth-60">
                   <nvpair id="public-ip-depth-60" name="OCF_CHECK_LEVEL" value="10"/>
                </instance_attributes>
             </op>
             <op id="public-ip-health-300" name="monitor" interval="300">
                <instance_attributes id="params-public-ip-depth-300">
                   <nvpair id="public-ip-depth-300" name="OCF_CHECK_LEVEL" value="20"/>
                </instance_attributes>
            </op>
          </operations>
          <instance_attributes id="params-public-ip">
              <nvpair id="public-ip-level" name="ip" value="192.0.2.2"/>
          </instance_attributes>
       </primitive>
 
 Disabling a Monitor Operation
 _____________________________
 
 The easiest way to stop a recurring monitor is to just delete it.
 However, there can be times when you only want to disable it
 temporarily.  In such cases, simply add ``enabled=false`` to the
 operation's definition.
 
 .. topic:: Example of an OCF resource with a disabled health check
 
    .. code-block:: xml
 
       <primitive id="Public-IP" class="ocf" type="IPaddr" provider="heartbeat">
          <operations>
             <op id="public-ip-check" name="monitor" interval="60s" enabled="false"/>
          </operations>
          <instance_attributes id="params-public-ip">
             <nvpair id="public-ip-addr" name="ip" value="192.0.2.2"/>
          </instance_attributes>
       </primitive>
 
 This can be achieved from the command line by executing:
 
 .. code-block:: none
 
    # cibadmin --modify --xml-text '<op id="public-ip-check" enabled="false"/>'
 
 Once you've done whatever you needed to do, you can then re-enable it with
 
 .. code-block:: none
 
    # cibadmin --modify --xml-text '<op id="public-ip-check" enabled="true"/>'
 
 .. [#] See https://github.com/ClusterLabs/OCF-spec/tree/main/ra. The
        Pacemaker implementation has been somewhat extended from the OCF specs.
 
 .. [#] The resource-agents source code includes the **ocf-tester** script,
        which can be useful in this regard.
 
 .. [#] See http://refspecs.linux-foundation.org/LSB_3.0.0/LSB-Core-generic/LSB-Core-generic/iniscrptact.html
        for the LSB Spec as it relates to init scripts.
 
 .. [#] For example, http://0pointer.de/blog/projects/systemd-for-admins-3.html
 
 .. [#] The project has two independent forks, hosted at
        https://www.nagios-plugins.org/ and https://www.monitoring-plugins.org/. Output
        from both projects' plugins is similar, so plugins from either project can be
        used with pacemaker.
 
 .. [#] Currently, anyway. Automatic monitoring operations may be added in a future
        version of Pacemaker.
diff --git a/doc/sphinx/Pacemaker_Explained/rules.rst b/doc/sphinx/Pacemaker_Explained/rules.rst
index 86a404ac89..296438c11e 100644
--- a/doc/sphinx/Pacemaker_Explained/rules.rst
+++ b/doc/sphinx/Pacemaker_Explained/rules.rst
@@ -1,954 +1,963 @@
 .. index::
    single: rule
 
 .. _rules:
 
 Rules
 -----
 
 Rules can be used to make your configuration more dynamic, allowing values to
 change depending on the time or the value of a node attribute. Examples of
 things rules are useful for:
 
 * Set a higher value for :ref:`resource-stickiness <resource-stickiness>`
   during working hours, to minimize downtime, and a lower value on weekends, to
   allow resources to move to their most preferred locations when people aren't
   around to notice.
 
 * Automatically place the cluster into maintenance mode during a scheduled
   maintenance window.
 
 * Assign certain nodes and resources to a particular department via custom
   node attributes and meta-attributes, and add a single location constraint
   that restricts the department's resources to run only on those nodes.
 
 Each constraint type or property set that supports rules may contain one or more
 ``rule`` elements specifying conditions under which the constraint or properties
 take effect. Examples later in this chapter will make this clearer.
 
 .. index::
    pair: XML element; rule
 
 Rule Properties
 ###############
 
 .. table:: **Attributes of a rule Element**
+   :widths: 1 1 3
 
    +-----------------+-------------+-------------------------------------------+
    | Attribute       | Default     | Description                               |
    +=================+=============+===========================================+
    | id              |             | .. index::                                |
    |                 |             |    pair: rule; id                         |
    |                 |             |                                           |
    |                 |             | A unique name for this element (required) |
    +-----------------+-------------+-------------------------------------------+
    | role            | ``Started`` | .. index::                                |
    |                 |             |    pair: rule; role                       |
    |                 |             |                                           |
    |                 |             | The rule is in effect only when the       |
    |                 |             | resource is in the specified role.        |
    |                 |             | Allowed values are ``Started``,           |
    |                 |             | ``Unpromoted``, and ``Promoted``. A rule  |
    |                 |             | with a ``role`` of ``Promoted`` cannot    |
    |                 |             | determine the initial location of a clone |
    |                 |             | instance and will only affect which of    |
    |                 |             | the active instances will be promoted.    |
    +-----------------+-------------+-------------------------------------------+
    | score           |             | .. index::                                |
    |                 |             |    pair: rule; score                      |
    |                 |             |                                           |
    |                 |             | If this rule is used in a location        |
    |                 |             | constraint and evaluates to true, apply   |
    |                 |             | this score to the constraint. Only one of |
    |                 |             | ``score`` and ``score-attribute`` may be  |
    |                 |             | used.                                     |
    +-----------------+-------------+-------------------------------------------+
    | score-attribute |             | .. index::                                |
    |                 |             |    pair: rule; score-attribute            |
    |                 |             |                                           |
    |                 |             | If this rule is used in a location        |
    |                 |             | constraint and evaluates to true, use the |
    |                 |             | value of this node attribute as the score |
    |                 |             | to apply to the constraint. Only one of   |
    |                 |             | ``score`` and ``score-attribute`` may be  |
    |                 |             | used.                                     |
    +-----------------+-------------+-------------------------------------------+
    | boolean-op      | ``and``     | .. index::                                |
    |                 |             |    pair: rule; boolean-op                 |
    |                 |             |                                           |
    |                 |             | If this rule contains more than one       |
    |                 |             | condition, a value of ``and`` specifies   |
    |                 |             | that the rule evaluates to true only if   |
    |                 |             | all conditions are true, and a value of   |
    |                 |             | ``or`` specifies that the rule evaluates  |
    |                 |             | to true if any condition is true.         |
    +-----------------+-------------+-------------------------------------------+
 
 A ``rule`` element must contain one or more conditions. A condition may be an
 ``expression`` element, a ``date_expression`` element, or another ``rule`` element.
 
 
 .. index::
    single: rule; node attribute expression
    single: node attribute; rule expression
    pair: XML element; expression
 
 .. _node_attribute_expressions:
 
 Node Attribute Expressions
 ##########################
 
 Expressions are rule conditions based on the values of node attributes.
 
 .. table:: **Attributes of an expression Element**
+   :class: longtable
+   :widths: 1 2 3
 
    +--------------+---------------------------------+-------------------------------------------+
    | Attribute    | Default                         | Description                               |
    +==============+=================================+===========================================+
    | id           |                                 | .. index::                                |
    |              |                                 |    pair: expression; id                   |
    |              |                                 |                                           |
    |              |                                 | A unique name for this element (required) |
    +--------------+---------------------------------+-------------------------------------------+
    | attribute    |                                 | .. index::                                |
    |              |                                 |    pair: expression; attribute            |
    |              |                                 |                                           |
    |              |                                 | The node attribute to test (required)     |
    +--------------+---------------------------------+-------------------------------------------+
    | type         | The default type for            | .. index::                                |
    |              | ``lt``, ``gt``, ``lte``, and    |    pair: expression; type                 |
    |              | ``gte`` operations is ``number``|                                           |
    |              | if either value contains a      | How the node attributes should be         |
    |              | decimal point character, or     | compared. Allowed values are ``string``,  |
    |              | ``integer`` otherwise. The      | ``integer`` *(since 2.0.5)*, ``number``,  |
    |              | default type for all other      | and ``version``. ``integer`` truncates    |
    |              | operations is ``string``. If a  | floating-point values if necessary before |
    |              | numeric parse fails for either  | performing a 64-bit integer comparison.   |
    |              | value, then the values are      | ``number`` performs a double-precision    |
    |              | compared as type ``string``.    | floating-point comparison                 |
    |              |                                 | *(32-bit integer before 2.0.5)*.          |
    +--------------+---------------------------------+-------------------------------------------+
    | operation    |                                 | .. index::                                |
    |              |                                 |    pair: expression; operation            |
    |              |                                 |                                           |
    |              |                                 | The comparison to perform (required).     |
    |              |                                 | Allowed values:                           |
    |              |                                 |                                           |
    |              |                                 | * ``lt:`` True if the node attribute value|
    |              |                                 |    is less than the comparison value      |
    |              |                                 | * ``gt:`` True if the node attribute value|
    |              |                                 |    is greater than the comparison value   |
    |              |                                 | * ``lte:`` True if the node attribute     |
    |              |                                 |    value is less than or equal to the     |
    |              |                                 |    comparison value                       |
    |              |                                 | * ``gte:`` True if the node attribute     |
    |              |                                 |    value is greater than or equal to the  |
    |              |                                 |    comparison value                       |
    |              |                                 | * ``eq:`` True if the node attribute value|
    |              |                                 |    is equal to the comparison value       |
    |              |                                 | * ``ne:`` True if the node attribute value|
    |              |                                 |    is not equal to the comparison value   |
    |              |                                 | * ``defined:`` True if the node has the   |
    |              |                                 |    named attribute                        |
    |              |                                 | * ``not_defined:`` True if the node does  |
    |              |                                 |    not have the named attribute           |
    +--------------+---------------------------------+-------------------------------------------+
    | value        |                                 | .. index::                                |
    |              |                                 |    pair: expression; value                |
    |              |                                 |                                           |
    |              |                                 | User-supplied value for comparison        |
    |              |                                 | (required for operations other than       |
    |              |                                 | ``defined`` and ``not_defined``)          |
    +--------------+---------------------------------+-------------------------------------------+
    | value-source | ``literal``                     | .. index::                                |
    |              |                                 |    pair: expression; value-source         |
    |              |                                 |                                           |
    |              |                                 | How the ``value`` is derived. Allowed     |
    |              |                                 | values:                                   |
    |              |                                 |                                           |
    |              |                                 | * ``literal``: ``value`` is a literal     |
    |              |                                 |   string to compare against               |
    |              |                                 | * ``param``: ``value`` is the name of a   |
    |              |                                 |   resource parameter to compare against   |
    |              |                                 |   (only valid in location constraints)    |
    |              |                                 | * ``meta``: ``value`` is the name of a    |
    |              |                                 |   resource meta-attribute to compare      |
    |              |                                 |   against (only valid in location         |
    |              |                                 |   constraints)                            |
    +--------------+---------------------------------+-------------------------------------------+
 
 .. _node-attribute-expressions-special:
 
 In addition to custom node attributes defined by the administrator, the cluster
 defines special, built-in node attributes for each node that can also be used
 in rule expressions.
 
 .. table:: **Built-in Node Attributes**
+   :widths: 1 4
 
    +---------------+-----------------------------------------------------------+
    | Name          | Value                                                     |
    +===============+===========================================================+
    | #uname        | :ref:`Node name <node_name>`                              |
    +---------------+-----------------------------------------------------------+
    | #id           | Node ID                                                   |
    +---------------+-----------------------------------------------------------+
    | #kind         | Node type. Possible values are ``cluster``, ``remote``,   |
    |               | and ``container``. Kind is ``remote`` for Pacemaker Remote|
    |               | nodes created with the ``ocf:pacemaker:remote`` resource, |
    |               | and ``container`` for Pacemaker Remote guest nodes and    |
    |               | bundle nodes                                              |
    +---------------+-----------------------------------------------------------+
    | #is_dc        | ``true`` if this node is the cluster's Designated         |
    |               | Controller (DC), ``false`` otherwise                      |
    +---------------+-----------------------------------------------------------+
    | #cluster-name | The value of the ``cluster-name`` cluster property, if set|
    +---------------+-----------------------------------------------------------+
    | #site-name    | The value of the ``site-name`` node attribute, if set,    |
    |               | otherwise identical to ``#cluster-name``                  |
    +---------------+-----------------------------------------------------------+
    | #role         | The role the relevant promotable clone resource has on    |
    |               | this node. Valid only within a rule for a location        |
    |               | constraint for a promotable clone resource.               |
    +---------------+-----------------------------------------------------------+
 
 .. Add_to_above_table_if_released:
 
    +---------------+-----------------------------------------------------------+
    | #ra-version   | The installed version of the resource agent on the node,  |
    |               | as defined by the ``version`` attribute of the            |
    |               | ``resource-agent`` tag in the agent's metadata. Valid only|
    |               | within rules controlling resource options. This can be    |
    |               | useful during rolling upgrades of a backward-incompatible |
    |               | resource agent. *(since x.x.x)*                           |
 
 
 .. index::
    single: rule; date/time expression
    pair: XML element; date_expression
 
 Date/Time Expressions
 #####################
 
 Date/time expressions are rule conditions based (as the name suggests) on the
 current date and time.
 
 A ``date_expression`` element may optionally contain a ``date_spec`` or
 ``duration`` element depending on the context.
 
 .. table:: **Attributes of a date_expression Element**
+   :widths: 1 4
 
    +---------------+-----------------------------------------------------------+
    | Attribute     | Description                                               |
    +===============+===========================================================+
    | id            | .. index::                                                |
    |               |    pair: id; date_expression                              |
    |               |                                                           |
    |               | A unique name for this element (required)                 |
    +---------------+-----------------------------------------------------------+
    | start         | .. index::                                                |
    |               |    pair: start; date_expression                           |
    |               |                                                           |
    |               | A date/time conforming to the                             |
    |               | `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_       |
    |               | specification. May be used when ``operation`` is          |
    |               | ``in_range`` (in which case at least one of ``start`` or  |
    |               | ``end`` must be specified) or ``gt`` (in which case       |
    |               | ``start`` is required).                                   |
    +---------------+-----------------------------------------------------------+
    | end           | .. index::                                                |
    |               |    pair: end; date_expression                             |
    |               |                                                           |
    |               | A date/time conforming to the                             |
    |               | `ISO8601 <https://en.wikipedia.org/wiki/ISO_8601>`_       |
    |               | specification. May be used when ``operation`` is          |
    |               | ``in_range`` (in which case at least one of ``start`` or  |
    |               | ``end`` must be specified) or ``lt`` (in which case       |
    |               | ``end`` is required).                                     |
    +---------------+-----------------------------------------------------------+
    | operation     | .. index::                                                |
    |               |    pair: operation; date_expression                       |
    |               |                                                           |
    |               | Compares the current date/time with the start and/or end  |
    |               | date, depending on the context. Allowed values:           |
    |               |                                                           |
    |               | * ``gt:`` True if the current date/time is after ``start``|
    |               | * ``lt:`` True if the current date/time is before ``end`` |
    |               | * ``in_range:`` True if the current date/time is after    |
    |               |   ``start`` (if specified) and before either ``end`` (if  |
    |               |   specified) or ``start`` plus the value of the           |
    |               |   ``duration`` element (if one is contained in the        |
    |               |   ``date_expression``)                                    |
    |               | * ``date_spec:`` True if the current date/time matches    |
    |               |   the specification given in the contained ``date_spec``  |
    |               |   element (described below)                               |
    +---------------+-----------------------------------------------------------+
 
 
 .. note:: There is no ``eq``, ``neq``, ``gte``, or ``lte`` operation, since
           they would be valid only for a single second.
 
 
 .. index::
    single: date specification
    pair: XML element; date_spec
 
 Date Specifications
 ___________________
 
 A ``date_spec`` element is used to create a cron-like expression relating
 to time. Each field can contain a single number or range. Any field not
 supplied is ignored.
 
 .. table:: **Attributes of a date_spec Element**
+   :widths: 1 3
 
    +---------------+-----------------------------------------------------------+
    | Attribute     | Description                                               |
    +===============+===========================================================+
    | id            | .. index::                                                |
    |               |    pair: id; date_spec                                    |
    |               |                                                           |
    |               | A unique name for this element (required)                 |
    +---------------+-----------------------------------------------------------+
    | hours         | .. index::                                                |
    |               |    pair: hours; date_spec                                 |
    |               |                                                           |
    |               | Allowed values: 0-23 (where 0 is midnight and 23 is       |
    |               | 11 p.m.)                                                  |
    +---------------+-----------------------------------------------------------+
    | monthdays     | .. index::                                                |
    |               |    pair: monthdays; date_spec                             |
    |               |                                                           |
    |               | Allowed values: 1-31 (depending on month and year)        |
    +---------------+-----------------------------------------------------------+
    | weekdays      | .. index::                                                |
    |               |    pair: weekdays; date_spec                              |
    |               |                                                           |
    |               | Allowed values: 1-7 (where 1 is Monday and  7 is Sunday)  |
    +---------------+-----------------------------------------------------------+
    | yeardays      | .. index::                                                |
    |               |    pair: yeardays; date_spec                              |
    |               |                                                           |
    |               | Allowed values: 1-366 (depending on the year)             |
    +---------------+-----------------------------------------------------------+
    | months        | .. index::                                                |
    |               |    pair: months; date_spec                                |
    |               |                                                           |
    |               | Allowed values: 1-12                                      |
    +---------------+-----------------------------------------------------------+
    | weeks         | .. index::                                                |
    |               |    pair: weeks; date_spec                                 |
    |               |                                                           |
    |               | Allowed values: 1-53 (depending on weekyear)              |
    +---------------+-----------------------------------------------------------+
    | years         | .. index::                                                |
    |               |    pair: years; date_spec                                 |
    |               |                                                           |
    |               | Year according to the Gregorian calendar                  |
    +---------------+-----------------------------------------------------------+
    | weekyears     | .. index::                                                |
    |               |    pair: weekyears; date_spec                             |
    |               |                                                           |
    |               | Year in which the week started; for example, 1 January    |
    |               | 2005 can be specified in ISO 8601 as "2005-001 Ordinal",  |
    |               | "2005-01-01 Gregorian" or "2004-W53-6 Weekly" and thus    |
    |               | would match ``years="2005"`` or ``weekyears="2004"``      |
    +---------------+-----------------------------------------------------------+
    | moon          | .. index::                                                |
    |               |    pair: moon; date_spec                                  |
    |               |                                                           |
    |               | Allowed values are 0-7 (where 0 is the new moon and 4 is  |
    |               | full moon). Seriously, you can use this. This was         |
    |               | implemented to demonstrate the ease with which new        |
    |               | comparisons could be added.                               |
    +---------------+-----------------------------------------------------------+
 
 For example, ``monthdays="1"`` matches the first day of every month, and
 ``hours="09-17"`` matches the hours between 9 a.m. and 5 p.m. (inclusive).
 
 At this time, multiple ranges (e.g. ``weekdays="1,2"`` or ``weekdays="1-2,5-6"``)
 are not supported.
 
 .. note:: Pacemaker can calculate when evaluation of a ``date_expression`` with
           an ``operation`` of ``gt``, ``lt``, or ``in_range`` will next change,
           and schedule a cluster re-check for that time. However, it does not
           do this for ``date_spec``.  Instead, it evaluates the ``date_spec``
           whenever a cluster re-check naturally happens via a cluster event or
           the ``cluster-recheck-interval`` cluster option.
 
           For example, if you have a ``date_spec`` enabling a resource from 9
           a.m. to 5 p.m., and ``cluster-recheck-interval`` has been set to 5
           minutes, then sometime between 9 a.m. and 9:05 a.m. the cluster would
           notice that it needs to start the resource, and sometime between 5
           p.m. and 5:05 p.m. it would realize that it needs to stop the
           resource. The timing of the actual start and stop actions will
           further depend on factors such as any other actions the cluster may
           need to perform first, and the load of the machine.
 
 
 .. index::
    single: duration
    pair: XML element; duration
 
 Durations
 _________
 
 A ``duration`` is used to calculate a value for ``end`` when one is not
 supplied to ``in_range`` operations. It contains one or more attributes each
 containing a single number. Any attribute not supplied is ignored.
 
 .. table:: **Attributes of a duration Element**
+   :widths: 1 3
 
    +---------------+-----------------------------------------------------------+
    | Attribute     | Description                                               |
    +===============+===========================================================+
    | id            | .. index::                                                |
    |               |    pair: id; duration                                     |
    |               |                                                           |
    |               | A unique name for this element (required)                 |
    +---------------+-----------------------------------------------------------+
    | seconds       | .. index::                                                |
    |               |    pair: seconds; duration                                |
    |               |                                                           |
    |               | This many seconds will be added to the total duration     |
    +---------------+-----------------------------------------------------------+
    | minutes       | .. index::                                                |
    |               |    pair: minutes; duration                                |
    |               |                                                           |
    |               | This many minutes will be added to the total duration     |
    +---------------+-----------------------------------------------------------+
    | hours         | .. index::                                                |
    |               |    pair: hours; duration                                  |
    |               |                                                           |
    |               | This many hours will be added to the total duration       |
    +---------------+-----------------------------------------------------------+
    | weeks         | .. index::                                                |
    |               |    pair: weeks; duration                                  |
    |               |                                                           |
    |               | This many weeks will be added to the total duration       |
    +---------------+-----------------------------------------------------------+
    | months        | .. index::                                                |
    |               |    pair: months; duration                                 |
    |               |                                                           |
    |               | This many months will be added to the total duration      |
    +---------------+-----------------------------------------------------------+
    | years         | .. index::                                                |
    |               |    pair: years; duration                                  |
    |               |                                                           |
    |               | This many years will be added to the total duration       |
    +---------------+-----------------------------------------------------------+
 
 
 Example Time-Based Expressions
 ______________________________
 
 A small sample of how time-based expressions can be used:
 
 .. topic:: True if now is any time in the year 2005
 
    .. code-block:: xml
 
       <rule id="rule1" score="INFINITY">
          <date_expression id="date_expr1" start="2005-001" operation="in_range">
           <duration id="duration1" years="1"/>
          </date_expression>
       </rule>
 
    or equivalently:
 
    .. code-block:: xml
 
       <rule id="rule2" score="INFINITY">
          <date_expression id="date_expr2" operation="date_spec">
           <date_spec id="date_spec2" years="2005"/>
          </date_expression>
       </rule>
 
 .. topic:: 9 a.m. to 5 p.m. Monday through Friday
 
    .. code-block:: xml
 
       <rule id="rule3" score="INFINITY">
          <date_expression id="date_expr3" operation="date_spec">
           <date_spec id="date_spec3" hours="9-16" weekdays="1-5"/>
          </date_expression>
       </rule>
 
    Note that the ``16`` matches all the way through ``16:59:59``, because the
    numeric value of the hour still matches.
 
 .. topic:: 9 a.m. to 6 p.m. Monday through Friday or anytime Saturday
 
    .. code-block:: xml
 
       <rule id="rule4" score="INFINITY" boolean-op="or">
          <date_expression id="date_expr4-1" operation="date_spec">
           <date_spec id="date_spec4-1" hours="9-16" weekdays="1-5"/>
          </date_expression>
          <date_expression id="date_expr4-2" operation="date_spec">
           <date_spec id="date_spec4-2" weekdays="6"/>
          </date_expression>
       </rule>
 
 .. topic:: 9 a.m. to 5 p.m. or 9 p.m. to 12 a.m. Monday through Friday
 
    .. code-block:: xml
 
       <rule id="rule5" score="INFINITY" boolean-op="and">
          <rule id="rule5-nested1" score="INFINITY" boolean-op="or">
           <date_expression id="date_expr5-1" operation="date_spec">
            <date_spec id="date_spec5-1" hours="9-16"/>
           </date_expression>
           <date_expression id="date_expr5-2" operation="date_spec">
            <date_spec id="date_spec5-2" hours="21-23"/>
           </date_expression>
          </rule>
          <date_expression id="date_expr5-3" operation="date_spec">
           <date_spec id="date_spec5-3" weekdays="1-5"/>
          </date_expression>
       </rule>
 
 .. topic:: Mondays in March 2005
 
    .. code-block:: xml
 
       <rule id="rule6" score="INFINITY" boolean-op="and">
          <date_expression id="date_expr6-1" operation="date_spec">
           <date_spec id="date_spec6" weekdays="1"/>
          </date_expression>
          <date_expression id="date_expr6-2" operation="in_range"
            start="2005-03-01" end="2005-04-01"/>
       </rule>
 
    .. note:: Because no time is specified with the above dates, 00:00:00 is
              implied. This means that the range includes all of 2005-03-01 but
              none of 2005-04-01. You may wish to write ``end`` as
              ``"2005-03-31T23:59:59"`` to avoid confusion.
 
 .. topic:: A full moon on Friday the 13th
 
    .. code-block:: xml
 
       <rule id="rule7" score="INFINITY" boolean-op="and">
          <date_expression id="date_expr7" operation="date_spec">
           <date_spec id="date_spec7" weekdays="5" monthdays="13" moon="4"/>
          </date_expression>
       </rule>
 
 
 .. index::
    single: rule; resource expression
    single: resource; rule expression
    pair: XML element; rsc_expression
 
 Resource Expressions
 ####################
 
 An ``rsc_expression`` *(since 2.0.5)* is a rule condition based on a resource
 agent's properties. This rule is only valid within an ``rsc_defaults`` or
 ``op_defaults`` context. None of the matching attributes of ``class``,
 ``provider``, and ``type`` are required. If one is omitted, all values of that
 attribute will match.  For instance, omitting ``type`` means every type will
 match.
 
 .. table:: **Attributes of a rsc_expression Element**
+   :widths: 1 3
 
    +---------------+-----------------------------------------------------------+
    | Attribute     | Description                                               |
    +===============+===========================================================+
    | id            | .. index::                                                |
    |               |    pair: id; rsc_expression                               |
    |               |                                                           |
    |               | A unique name for this element (required)                 |
    +---------------+-----------------------------------------------------------+
    | class         | .. index::                                                |
    |               |    pair: class; rsc_expression                            |
    |               |                                                           |
    |               | The standard name to be matched against resource agents   |
    +---------------+-----------------------------------------------------------+
    | provider      | .. index::                                                |
    |               |    pair: provider; rsc_expression                         |
    |               |                                                           |
    |               | If given, the vendor to be matched against resource       |
    |               | agents (only relevant when ``class`` is ``ocf``)          |
    +---------------+-----------------------------------------------------------+
    | type          | .. index::                                                |
    |               |    pair: type; rsc_expression                             |
    |               |                                                           |
    |               | The name of the resource agent to be matched              |
    +---------------+-----------------------------------------------------------+
 
 Example Resource-Based Expressions
 __________________________________
 
 A small sample of how resource-based expressions can be used:
 
 .. topic:: True for all ``ocf:heartbeat:IPaddr2`` resources
 
    .. code-block:: xml
 
       <rule id="rule1" score="INFINITY">
           <rsc_expression id="rule_expr1" class="ocf" provider="heartbeat" type="IPaddr2"/>
       </rule>
 
 .. topic:: Provider doesn't apply to non-OCF resources
 
    .. code-block:: xml
 
       <rule id="rule2" score="INFINITY">
           <rsc_expression id="rule_expr2" class="stonith" type="fence_xvm"/>
       </rule>
 
 
 .. index::
    single: rule; operation expression
    single: operation; rule expression
    pair: XML element; op_expression
 
 Operation Expressions
 #####################
 
 
 An ``op_expression`` *(since 2.0.5)* is a rule condition based on an action of
 some resource agent. This rule is only valid within an ``op_defaults`` context.
 
 .. table:: **Attributes of an op_expression Element**
+   :widths: 1 3
 
    +---------------+-----------------------------------------------------------+
    | Attribute     | Description                                               |
    +===============+===========================================================+
    | id            | .. index::                                                |
    |               |    pair: id; op_expression                                |
    |               |                                                           |
    |               | A unique name for this element (required)                 |
    +---------------+-----------------------------------------------------------+
    | name          | .. index::                                                |
    |               |    pair: name; op_expression                              |
    |               |                                                           |
    |               | The action name to match against. This can be any action  |
    |               | supported by the resource agent; common values include    |
    |               | ``monitor``, ``start``, and ``stop`` (required).          |
    +---------------+-----------------------------------------------------------+
    | interval      | .. index::                                                |
    |               |    pair: interval; op_expression                          |
    |               |                                                           |
    |               | The interval of the action to match against. If not given,|
    |               | only the name attribute will be used to match.            |
    +---------------+-----------------------------------------------------------+
 
 Example Operation-Based Expressions
 ___________________________________
 
 A small sample of how operation-based expressions can be used:
 
 .. topic:: True for all monitor actions
 
    .. code-block:: xml
 
       <rule id="rule1" score="INFINITY">
           <op_expression id="rule_expr1" name="monitor"/>
       </rule>
 
 .. topic:: True for all monitor actions with a 10 second interval
 
    .. code-block:: xml
 
       <rule id="rule2" score="INFINITY">
           <op_expression id="rule_expr2" name="monitor" interval="10s"/>
       </rule>
 
 
 .. index::
    pair: location constraint; rule
 
 Using Rules to Determine Resource Location
 ##########################################
 
 A location constraint may contain one or more top-level rules. The cluster will
 act as if there is a separate location constraint for each rule that evaluates
 as true.
 
 Consider the following simple location constraint:
 
 .. topic:: Prevent resource ``webserver`` from running on node ``node3``
 
    .. code-block:: xml
 
       <rsc_location id="ban-apache-on-node3" rsc="webserver"
                     score="-INFINITY" node="node3"/>
 
 The same constraint can be more verbosely written using a rule:
 
 .. topic:: Prevent resource ``webserver`` from running on node ``node3`` using a rule
 
    .. code-block:: xml
 
       <rsc_location id="ban-apache-on-node3" rsc="webserver">
           <rule id="ban-apache-rule" score="-INFINITY">
             <expression id="ban-apache-expr" attribute="#uname"
               operation="eq" value="node3"/>
           </rule>
       </rsc_location>
 
 The advantage of using the expanded form is that one could add more expressions
 (for example, limiting the constraint to certain days of the week), or activate
 the constraint by some node attribute other than node name.
 
 Location Rules Based on Other Node Properties
 _____________________________________________
 
 The expanded form allows us to match on node properties other than its name.
 If we rated each machine's CPU power such that the cluster had the following
 nodes section:
 
 .. topic:: Sample node section with node attributes
 
    .. code-block:: xml
 
       <nodes>
          <node id="uuid1" uname="c001n01" type="normal">
             <instance_attributes id="uuid1-custom_attrs">
               <nvpair id="uuid1-cpu_mips" name="cpu_mips" value="1234"/>
             </instance_attributes>
          </node>
          <node id="uuid2" uname="c001n02" type="normal">
             <instance_attributes id="uuid2-custom_attrs">
               <nvpair id="uuid2-cpu_mips" name="cpu_mips" value="5678"/>
             </instance_attributes>
          </node>
       </nodes>
 
 then we could prevent resources from running on underpowered machines with this
 rule:
 
 .. topic:: Rule using a node attribute (to be used inside a location constraint)
 
    .. code-block:: xml
 
       <rule id="need-more-power-rule" score="-INFINITY">
          <expression id="need-more-power-expr" attribute="cpu_mips"
                      operation="lt" value="3000"/>
       </rule>
 
 Using ``score-attribute`` Instead of ``score``
 ______________________________________________
 
 When using ``score-attribute`` instead of ``score``, each node matched by the
 rule has its score adjusted differently, according to its value for the named
 node attribute. Thus, in the previous example, if a rule inside a location
 constraint for a resource used ``score-attribute="cpu_mips"``, ``c001n01``
 would have its preference to run the resource increased by ``1234`` whereas
 ``c001n02`` would have its preference increased by ``5678``.
 
 
 .. index::
    pair: cluster option; rule
    pair: instance attribute; rule
    pair: meta-attribute; rule
    pair: resource defaults; rule
    pair: operation defaults; rule
    pair: node attribute; rule
 
 Using Rules to Define Options
 #############################
 
 Rules may be used to control a variety of options:
 
 * :ref:`Cluster options <cluster_options>` (``cluster_property_set`` elements)
 * :ref:`Node attributes <node_attributes>` (``instance_attributes`` or
   ``utilization`` elements inside a ``node`` element)
 * :ref:`Resource options <resource_options>` (``utilization``,
   ``meta_attributes``, or ``instance_attributes`` elements inside a resource
   definition element or ``op`` , ``rsc_defaults``, ``op_defaults``, or
   ``template`` element)
 * :ref:`Operation properties <operation_properties>` (``meta_attributes``
   elements inside an ``op`` or ``op_defaults`` element)
 
 .. note::
 
    Attribute-based expressions for meta-attributes can only be used within
    ``operations`` and ``op_defaults``.  They will not work with resource
    configuration or ``rsc_defaults``.  Additionally, attribute-based
    expressions cannot be used with cluster options.
 
 Using Rules to Control Resource Options
 _______________________________________
 
 Often some cluster nodes will be different from their peers. Sometimes,
 these differences -- e.g. the location of a binary or the names of network
 interfaces -- require resources to be configured differently depending
 on the machine they're hosted on.
 
 By defining multiple ``instance_attributes`` objects for the resource and
 adding a rule to each, we can easily handle these special cases.
 
 In the example below, ``mySpecialRsc`` will use eth1 and port 9999 when run on
 ``node1``, eth2 and port 8888 on ``node2`` and default to eth0 and port 9999
 for all other nodes.
 
 .. topic:: Defining different resource options based on the node name
 
    .. code-block:: xml
 
       <primitive id="mySpecialRsc" class="ocf" type="Special" provider="me">
          <instance_attributes id="special-node1" score="3">
           <rule id="node1-special-case" score="INFINITY" >
            <expression id="node1-special-case-expr" attribute="#uname"
              operation="eq" value="node1"/>
           </rule>
           <nvpair id="node1-interface" name="interface" value="eth1"/>
          </instance_attributes>
          <instance_attributes id="special-node2" score="2" >
           <rule id="node2-special-case" score="INFINITY">
            <expression id="node2-special-case-expr" attribute="#uname"
              operation="eq" value="node2"/>
           </rule>
           <nvpair id="node2-interface" name="interface" value="eth2"/>
           <nvpair id="node2-port" name="port" value="8888"/>
          </instance_attributes>
          <instance_attributes id="defaults" score="1" >
           <nvpair id="default-interface" name="interface" value="eth0"/>
           <nvpair id="default-port" name="port" value="9999"/>
          </instance_attributes>
       </primitive>
 
 The order in which ``instance_attributes`` objects are evaluated is determined
 by their score (highest to lowest). If not supplied, the score defaults to
 zero. Objects with an equal score are processed in their listed order. If the
 ``instance_attributes`` object has no rule, or a ``rule`` that evaluates to
 ``true``, then for any parameter the resource does not yet have a value for,
 the resource will use the parameter values defined by the ``instance_attributes``.
 
 For example, given the configuration above, if the resource is placed on
 ``node1``:
 
 * ``special-node1`` has the highest score (3) and so is evaluated first; its
   rule evaluates to ``true``, so ``interface`` is set to ``eth1``.
 * ``special-node2`` is evaluated next with score 2, but its rule evaluates to
   ``false``, so it is ignored.
 * ``defaults`` is evaluated last with score 1, and has no rule, so its values
   are examined; ``interface`` is already defined, so the value here is not
   used, but ``port`` is not yet defined, so ``port`` is set to ``9999``.
 
 Using Rules to Control Resource Defaults
 ________________________________________
 
 Rules can be used for resource and operation defaults. The following example
 illustrates how to set a different ``resource-stickiness`` value during and
 outside work hours. This allows resources to automatically move back to their
 most preferred hosts, but at a time that (in theory) does not interfere with
 business activities.
 
 .. topic:: Change ``resource-stickiness`` during working hours
 
    .. code-block:: xml
 
       <rsc_defaults>
          <meta_attributes id="core-hours" score="2">
             <rule id="core-hour-rule" score="0">
               <date_expression id="nine-to-five-Mon-to-Fri" operation="date_spec">
                 <date_spec id="nine-to-five-Mon-to-Fri-spec" hours="9-16" weekdays="1-5"/>
               </date_expression>
             </rule>
             <nvpair id="core-stickiness" name="resource-stickiness" value="INFINITY"/>
          </meta_attributes>
          <meta_attributes id="after-hours" score="1" >
             <nvpair id="after-stickiness" name="resource-stickiness" value="0"/>
          </meta_attributes>
       </rsc_defaults>
 
 Rules may be used similarly in ``instance_attributes`` or ``utilization``
 blocks.
 
 Any single block may directly contain only a single rule, but that rule may
 itself contain any number of rules.
 
 ``rsc_expression`` and ``op_expression`` blocks may additionally be used to
 set defaults on either a single resource or across an entire class of resources
 with a single rule. ``rsc_expression`` may be used to select resource agents
 within both ``rsc_defaults`` and ``op_defaults``, while ``op_expression`` may
 only be used within ``op_defaults``. If multiple rules succeed for a given
 resource agent, the last one specified will be the one that takes effect. As
 with any other rule, boolean operations may be used to make more complicated
 expressions.
 
 .. topic:: Default all IPaddr2 resources to stopped
 
    .. code-block:: xml
 
       <rsc_defaults>
           <meta_attributes id="op-target-role">
               <rule id="op-target-role-rule" score="INFINITY">
                   <rsc_expression id="op-target-role-expr" class="ocf" provider="heartbeat"
                     type="IPaddr2"/>
               </rule>
               <nvpair id="op-target-role-nvpair" name="target-role" value="Stopped"/>
           </meta_attributes>
       </rsc_defaults>
 
 .. topic:: Default all monitor action timeouts to 7 seconds
 
    .. code-block:: xml
 
       <op_defaults>
           <meta_attributes id="op-monitor-defaults">
               <rule id="op-monitor-default-rule" score="INFINITY">
                   <op_expression id="op-monitor-default-expr" name="monitor"/>
               </rule>
               <nvpair id="op-monitor-timeout" name="timeout" value="7s"/>
           </meta_attributes>
       </op_defaults>
 
 .. topic:: Default the timeout on all 10-second-interval monitor actions on ``IPaddr2`` resources to 8 seconds
 
    .. code-block:: xml
 
       <op_defaults>
           <meta_attributes id="op-monitor-and">
               <rule id="op-monitor-and-rule" score="INFINITY">
                   <rsc_expression id="op-monitor-and-rsc-expr" class="ocf" provider="heartbeat"
                     type="IPaddr2"/>
                   <op_expression id="op-monitor-and-op-expr" name="monitor" interval="10s"/>
               </rule>
               <nvpair id="op-monitor-and-timeout" name="timeout" value="8s"/>
           </meta_attributes>
       </op_defaults>
 
 
 .. index::
    pair: rule; cluster option
 
 Using Rules to Control Cluster Options
 ______________________________________
 
 Controlling cluster options is achieved in much the same manner as specifying
 different resource options on different nodes.
 
 The following example illustrates how to set ``maintenance_mode`` during a
 scheduled maintenance window. This will keep the cluster running but not
 monitor, start, or stop resources during this time.
 
 .. topic:: Schedule a maintenance window for 9 to 11 p.m. CDT Sept. 20, 2019
 
    .. code-block:: xml
 
       <crm_config>
          <cluster_property_set id="cib-bootstrap-options">
            <nvpair id="bootstrap-stonith-enabled" name="stonith-enabled" value="1"/>
          </cluster_property_set>
          <cluster_property_set id="normal-set" score="10">
            <nvpair id="normal-maintenance-mode" name="maintenance-mode" value="false"/>
          </cluster_property_set>
          <cluster_property_set id="maintenance-window-set" score="1000">
            <nvpair id="maintenance-nvpair1" name="maintenance-mode" value="true"/>
            <rule id="maintenance-rule1" score="INFINITY">
              <date_expression id="maintenance-date1" operation="in_range"
                start="2019-09-20 21:00:00 -05:00" end="2019-09-20 23:00:00 -05:00"/>
            </rule>
          </cluster_property_set>
       </crm_config>
 
 .. important:: The ``cluster_property_set`` with an ``id`` set to
                "cib-bootstrap-options" will *always* have the highest priority,
                regardless of any scores. Therefore, rules in another
                ``cluster_property_set`` can never take effect for any
                properties listed in the bootstrap set.
diff --git a/doc/sphinx/Pacemaker_Explained/status.rst b/doc/sphinx/Pacemaker_Explained/status.rst
index 51a59b9f11..2d7dd7e81c 100644
--- a/doc/sphinx/Pacemaker_Explained/status.rst
+++ b/doc/sphinx/Pacemaker_Explained/status.rst
@@ -1,368 +1,372 @@
 .. index::
    single: status
    single: XML element, status
 
 Status -- Here be dragons
 -------------------------
 
 Most users never need to understand the contents of the status section
 and can be happy with the output from ``crm_mon``.
 
 However for those with a curious inclination, this section attempts to
 provide an overview of its contents.
 
 .. index::
    single: node; status
        
 Node Status
 ###########
    
 In addition to the cluster's configuration, the CIB holds an
 up-to-date representation of each cluster node in the ``status`` section.
 
 .. topic:: A bare-bones status entry for a healthy node **cl-virt-1**
 
    .. code-block:: xml
 
       <node_state id="1" uname="cl-virt-1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
        <transient_attributes id="1"/>
        <lrm id="1"/>
       </node_state>
    
 Users are highly recommended *not* to modify any part of a node's
 state *directly*.  The cluster will periodically regenerate the entire
 section from authoritative sources, so any changes should be done
 with the tools appropriate to those sources.
          
 .. table:: **Authoritative Sources for State Information**
+   :widths: 1 1
 
    +----------------------+----------------------+
    | CIB Object           | Authoritative Source |
    +======================+======================+
    | node_state           | pacemaker-controld   |
    +----------------------+----------------------+
    | transient_attributes | pacemaker-attrd      |
    +----------------------+----------------------+
    | lrm                  | pacemaker-execd      |
    +----------------------+----------------------+
 
 The fields used in the ``node_state`` objects are named as they are
 largely for historical reasons and are rooted in Pacemaker's origins
 as the resource manager for the older Heartbeat project. They have remained
 unchanged to preserve compatibility with older versions.
          
 .. table:: **Node Status Fields**
+   :widths: 1 3
 
    +------------------+----------------------------------------------------------+
    | Field            | Description                                              |
    +==================+==========================================================+
    | id               | .. index:                                                |
    |                  |    single: id; node status                               |
    |                  |    single: node; status, id                              |
    |                  |                                                          |
    |                  | Unique identifier for the node.  Corosync-based clusters |
    |                  | use a numeric counter.                                   |
    +------------------+----------------------------------------------------------+
    | uname            | .. index::                                               |
    |                  |    single: uname; node status                            |
    |                  |    single: node; status, uname                           |
    |                  |                                                          |
    |                  | The node's name as known by the cluster                  |
    +------------------+----------------------------------------------------------+
    | in_ccm           | .. index::                                               |
    |                  |    single: in_ccm; node status                           |
    |                  |    single: node; status, in_ccm                          |
    |                  |                                                          |
    |                  | Is the node a member at the cluster communication later? |
    |                  | Allowed values: ``true``, ``false``.                     |
    +------------------+----------------------------------------------------------+
    | crmd             | .. index::                                               |
    |                  |    single: crmd; node status                             |
    |                  |    single: node; status, crmd                            |
    |                  |                                                          |
    |                  | Is the node a member at the pacemaker layer?  Allowed    |
    |                  | values: ``online``, ``offline``.                         |
    +------------------+----------------------------------------------------------+
    | crm-debug-origin | .. index::                                               |
    |                  |    single: crm-debug-origin; node status                 |
    |                  |    single: node; status, crm-debug-origin                |
    |                  |                                                          |
    |                  | The name of the source function that made the most       |
    |                  | recent change (for debugging purposes).                  |
    +------------------+----------------------------------------------------------+
    | join             | .. index::                                               |
    |                  |    single: join; node status                             |
    |                  |    single: node; status, join                            |
    |                  |                                                          |
    |                  | Does the node participate in hosting resources?          |
    |                  | Allowed values: ``down``, ``pending``, ``member``.       |
    |                  | ``banned``.                                              |
    +------------------+----------------------------------------------------------+
    | expected         | .. index::                                               |
    |                  |   single: expected; node status                          |
    |                  |   single: node; status, expected                         |
    |                  |                                                          |
    |                  | Expected value for ``join``.                             |
    +------------------+----------------------------------------------------------+
    
 The cluster uses these fields to determine whether, at the node level, the
 node is healthy or is in a failed state and needs to be fenced.
    
 Transient Node Attributes
 #########################
    
 Like regular :ref:`node_attributes`, the name/value
 pairs listed in the ``transient_attributes`` section help to describe the
 node.  However they are forgotten by the cluster when the node goes offline.
 This can be useful, for instance, when you want a node to be in standby mode
 (not able to run resources) just until the next reboot.
      
 In addition to any values the administrator sets, the cluster will
 also store information about failed resources here.
          
 .. topic:: A set of transient node attributes for node **cl-virt-1**
 
    .. code-block:: xml
    
       <transient_attributes id="cl-virt-1">
         <instance_attributes id="status-cl-virt-1">
            <nvpair id="status-cl-virt-1-pingd" name="pingd" value="3"/>
            <nvpair id="status-cl-virt-1-probe_complete" name="probe_complete" value="true"/>
            <nvpair id="status-cl-virt-1-fail-count-pingd:0.monitor_30000" name="fail-count-pingd:0#monitor_30000" value="1"/>
            <nvpair id="status-cl-virt-1-last-failure-pingd:0" name="last-failure-pingd:0" value="1239009742"/>
         </instance_attributes>
       </transient_attributes>
    
 In the above example, we can see that a monitor on the ``pingd:0`` resource has
 failed once, at 09:22:22 UTC 6 April 2009. [#]_.
 
 We also see that the node is connected to three **pingd** peers and that
 all known resources have been checked for on this machine (``probe_complete``).
          
 .. index::
    single: Operation History
 
 Operation History
 #################
    
 A node's resource history is held in the ``lrm_resources`` tag (a child
 of the ``lrm`` tag). The information stored here includes enough
 information for the cluster to stop the resource safely if it is
 removed from the ``configuration`` section. Specifically, the resource's
 ``id``, ``class``, ``type`` and ``provider`` are stored.
 
 .. topic:: A record of the ``apcstonith`` resource
 
    .. code-block:: xml
 
       <lrm_resource id="apcstonith" type="fence_apc_snmp" class="stonith"/>
    
 Additionally, we store the last job for every combination of
 ``resource``, ``action`` and ``interval``.  The concatenation of the values in
 this tuple are used to create the id of the ``lrm_rsc_op`` object.
 
 .. table:: **Contents of an lrm_rsc_op job**
+   :class: longtable
+   :widths: 1 3
 
    +------------------+----------------------------------------------------------+
    | Field            | Description                                              |
    +==================+==========================================================+
    | id               | .. index::                                               |
    |                  |    single: id; action status                             |
    |                  |    single: action; status, id                            |
    |                  |                                                          |
    |                  | Identifier for the job constructed from the resource's   |
    |                  | ``operation`` and ``interval``.                          |
    +------------------+----------------------------------------------------------+
    | call-id          | .. index::                                               |
    |                  |    single: call-id; action status                        |
    |                  |    single: action; status, call-id                       |
    |                  |                                                          |
    |                  | The job's ticket number. Used as a sort key to determine |
    |                  | the order in which the jobs were executed.               |
    +------------------+----------------------------------------------------------+
    | operation        | .. index::                                               |
    |                  |    single: operation; action status                      |
    |                  |    single: action; status, operation                     |
    |                  |                                                          |
    |                  | The action the resource agent was invoked with.          |
    +------------------+----------------------------------------------------------+
    | interval         | .. index::                                               |
    |                  |    single: interval; action status                       |
    |                  |    single: action; status, interval                      |
    |                  |                                                          |
    |                  | The frequency, in milliseconds, at which the operation   |
    |                  | will be repeated. A one-off job is indicated by 0.       |
    +------------------+----------------------------------------------------------+
    | op-status        | .. index::                                               |
    |                  |    single: op-status; action status                      |
    |                  |    single: action; status, op-status                     |
    |                  |                                                          |
    |                  | The job's status. Generally this will be either 0 (done) |
    |                  | or -1 (pending). Rarely used in favor of ``rc-code``.    |
    +------------------+----------------------------------------------------------+
    | rc-code          | .. index::                                               |
    |                  |    single: rc-code; action status                        |
    |                  |    single: action; status, rc-code                       |
    |                  |                                                          |
    |                  | The job's result. Refer to the *Resource Agents* chapter |
    |                  | of *Pacemaker Administration* for details on what the    |
    |                  | values here mean and how they are interpreted.           |
    +------------------+----------------------------------------------------------+
    | last-rc-change   | .. index::                                               |
    |                  |    single: last-rc-change; action status                 |
    |                  |    single: action; status, last-rc-change                |
    |                  |                                                          |
    |                  | Machine-local date/time, in seconds since epoch, at      |
    |                  | which the job first returned the current value of        |
    |                  | ``rc-code``.  For diagnostic purposes.                   |
    +------------------+----------------------------------------------------------+
    | exec-time        | .. index::                                               |
    |                  |    single: exec-time; action status                      |
    |                  |    single: action; status, exec-time                     |
    |                  |                                                          |
    |                  | Time, in milliseconds, that the job was running for.     |
    |                  | For diagnostic purposes.                                 |
    +------------------+----------------------------------------------------------+
    | queue-time       | .. index::                                               |
    |                  |    single: queue-time; action status                     |
    |                  |    single: action; status, queue-time                    |
    |                  |                                                          |
    |                  | Time, in seconds, that the job was queued for in the     |
    |                  | local executor. For diagnostic purposes.                 |
    +------------------+----------------------------------------------------------+
    | crm_feature_set  | .. index::                                               |
    |                  |    single: crm_feature_set; action status                |
    |                  |    single: action; status, crm_feature_set               |
    |                  |                                                          |
    |                  | The version which this job description conforms to. Used |
    |                  | when processing ``op-digest``.                           |
    +------------------+----------------------------------------------------------+
    | transition-key   | .. index::                                               |
    |                  |    single: transition-key; action status                 |
    |                  |    single: action; status, transition-key                |
    |                  |                                                          |
    |                  | A concatenation of the job's graph action number, the    |
    |                  | graph number, the expected result and the UUID of the    |
    |                  | controller instance that scheduled it. This is used to   |
    |                  | construct ``transition-magic`` (below).                  |
    +------------------+----------------------------------------------------------+
    | transition-magic | .. index::                                               |
    |                  |    single: transition-magic; action status               |
    |                  |    single: action; status, transition-magic              |
    |                  |                                                          |
    |                  | A concatenation of the job's ``op-status``, ``rc-code``  |
    |                  | and ``transition-key``. Guaranteed to be unique for the  |
    |                  | life of the cluster (which ensures it is part of CIB     |
    |                  | update notifications) and contains all the information   |
    |                  | needed for the controller to correctly analyze and       |
    |                  | process the completed job. Most importantly, the         |
    |                  | decomposed elements tell the controller if the job       |
    |                  | entry was expected and whether it failed.                |
    +------------------+----------------------------------------------------------+
    | op-digest        | .. index::                                               |
    |                  |    single: op-digest; action status                      |
    |                  |    single: action; status, op-digest                     |
    |                  |                                                          |
    |                  | An MD5 sum representing the parameters passed to the     |
    |                  | job. Used to detect changes to the configuration, to     |
    |                  | restart resources if necessary.                          |
    +------------------+----------------------------------------------------------+
    | crm-debug-origin | .. index::                                               |
    |                  |    single: crm-debug-origin; action status               |
    |                  |    single: action; status, crm-debug-origin              |
    |                  |                                                          |
    |                  | The origin of the current values.  For diagnostic        |
    |                  | purposes.                                                |
    +------------------+----------------------------------------------------------+
    
 Simple Operation History Example
 ________________________________
            
 .. topic:: A monitor operation (determines current state of the ``apcstonith`` resource)
 
    .. code-block:: xml
 
       <lrm_resource id="apcstonith" type="fence_apc_snmp" class="stonith">
         <lrm_rsc_op id="apcstonith_monitor_0" operation="monitor" call-id="2"
           rc-code="7" op-status="0" interval="0"
           crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
           op-digest="2e3da9274d3550dc6526fb24bfcbcba0"
           transition-key="22:2:7:2668bbeb-06d5-40f9-936d-24cb7f87006a"
           transition-magic="0:7;22:2:7:2668bbeb-06d5-40f9-936d-24cb7f87006a"
           last-rc-change="1239008085" exec-time="10" queue-time="0"/>
       </lrm_resource>
 
 In the above example, the job is a non-recurring monitor operation
 often referred to as a "probe" for the ``apcstonith`` resource.
 
 The cluster schedules probes for every configured resource on a node when
 the node first starts, in order to determine the resource's current state
 before it takes any further action.
        
 From the ``transition-key``, we can see that this was the 22nd action of
 the 2nd graph produced by this instance of the controller
 (2668bbeb-06d5-40f9-936d-24cb7f87006a).
 
 The third field of the ``transition-key`` contains a 7, which indicates
 that the job expects to find the resource inactive. By looking at the ``rc-code``
 property, we see that this was the case.
 
 As that is the only job recorded for this node, we can conclude that
 the cluster started the resource elsewhere.
    
 Complex Operation History Example
 _________________________________
            
 .. topic:: Resource history of a ``pingd`` clone with multiple jobs
 
    .. code-block:: xml
 
       <lrm_resource id="pingd:0" type="pingd" class="ocf" provider="pacemaker">
         <lrm_rsc_op id="pingd:0_monitor_30000" operation="monitor" call-id="34"
           rc-code="0" op-status="0" interval="30000"
           crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
           transition-key="10:11:0:2668bbeb-06d5-40f9-936d-24cb7f87006a"
           last-rc-change="1239009741" exec-time="10" queue-time="0"/>
         <lrm_rsc_op id="pingd:0_stop_0" operation="stop"
           crm-debug-origin="do_update_resource" crm_feature_set="3.0.1" call-id="32"
           rc-code="0" op-status="0" interval="0"
           transition-key="11:11:0:2668bbeb-06d5-40f9-936d-24cb7f87006a"
           last-rc-change="1239009741" exec-time="10" queue-time="0"/>
         <lrm_rsc_op id="pingd:0_start_0" operation="start" call-id="33"
           rc-code="0" op-status="0" interval="0"
           crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
           transition-key="31:11:0:2668bbeb-06d5-40f9-936d-24cb7f87006a"
           last-rc-change="1239009741" exec-time="10" queue-time="0" />
         <lrm_rsc_op id="pingd:0_monitor_0" operation="monitor" call-id="3"
           rc-code="0" op-status="0" interval="0"
           crm-debug-origin="do_update_resource" crm_feature_set="3.0.1"
           transition-key="23:2:7:2668bbeb-06d5-40f9-936d-24cb7f87006a"
           last-rc-change="1239008085" exec-time="20" queue-time="0"/>
         </lrm_resource>
    
 When more than one job record exists, it is important to first sort
 them by ``call-id`` before interpreting them.
 
 Once sorted, the above example can be summarized as:
 
 #. A non-recurring monitor operation returning 7 (not running), with a ``call-id`` of 3
 #. A stop operation returning 0 (success), with a ``call-id`` of 32
 #. A start operation returning 0 (success), with a ``call-id`` of 33
 #. A recurring monitor returning 0 (success), with a ``call-id`` of 34
 
 The cluster processes each job record to build up a picture of the
 resource's state.  After the first and second entries, it is
 considered stopped, and after the third it considered active.
 
 Based on the last operation, we can tell that the resource is
 currently active.
 
 Additionally, from the presence of a ``stop`` operation with a lower
 ``call-id`` than that of the ``start`` operation, we can conclude that the
 resource has been restarted.  Specifically this occurred as part of
 actions 11 and 31 of transition 11 from the controller instance with the key
 ``2668bbeb...``.  This information can be helpful for locating the
 relevant section of the logs when looking for the source of a failure.
 
 .. [#] You can use the standard ``date`` command to print a human-readable version
        of any seconds-since-epoch value, for example ``date -d @1239009742``.
diff --git a/lib/common/options.c b/lib/common/options.c
index bbd9d9b986..57654b8266 100644
--- a/lib/common/options.c
+++ b/lib/common/options.c
@@ -1,671 +1,672 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <string.h>
 #include <stdlib.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 
 #ifdef HAVE_GETOPT_H
 #  include <getopt.h>
 #endif
 
 #include <crm/crm.h>
 
 
 /*
  * Command-line option handling
  */
 
 static char *crm_short_options = NULL;
 static pcmk__cli_option_t *crm_long_options = NULL;
 static const char *crm_app_description = NULL;
 static const char *crm_app_usage = NULL;
 
 void
 pcmk__cli_option_cleanup()
 {
     free(crm_short_options);
     crm_short_options = NULL;
 }
 
 static struct option *
 create_long_opts(pcmk__cli_option_t *long_options)
 {
     struct option *long_opts = NULL;
 
 #ifdef HAVE_GETOPT_H
     int index = 0, lpc = 0;
 
     /*
      * A previous, possibly poor, choice of '?' as the short form of --help
      * means that getopt_long() returns '?' for both --help and for "unknown option"
      *
      * This dummy entry allows us to differentiate between the two in
      * pcmk__next_cli_option() and exit with the correct error code.
      */
     long_opts = pcmk__realloc(long_opts, (index + 1) * sizeof(struct option));
     long_opts[index].name = "__dummmy__";
     long_opts[index].has_arg = 0;
     long_opts[index].flag = 0;
     long_opts[index].val = '_';
     index++;
 
     // cppcheck seems not to understand the abort-logic in pcmk__realloc
     // cppcheck-suppress memleak
     for (lpc = 0; long_options[lpc].name != NULL; lpc++) {
         if (long_options[lpc].name[0] == '-') {
             continue;
         }
 
         long_opts = pcmk__realloc(long_opts, (index + 1) * sizeof(struct option));
         /*fprintf(stderr, "Creating %d %s = %c\n", index,
          * long_options[lpc].name, long_options[lpc].val);      */
         long_opts[index].name = long_options[lpc].name;
         long_opts[index].has_arg = long_options[lpc].has_arg;
         long_opts[index].flag = long_options[lpc].flag;
         long_opts[index].val = long_options[lpc].val;
         index++;
     }
 
     /* Now create the list terminator */
     long_opts = pcmk__realloc(long_opts, (index + 1) * sizeof(struct option));
     long_opts[index].name = NULL;
     long_opts[index].has_arg = 0;
     long_opts[index].flag = 0;
     long_opts[index].val = 0;
 #endif
 
     return long_opts;
 }
 
 /*!
  * \internal
  * \brief Define the command-line options a daemon or tool accepts
  *
  * \param[in] short_options  getopt(3)-style short option list
  * \param[in] app_usage      summary of how command is invoked (for help)
  * \param[in] long_options   definition of options accepted
  * \param[in] app_desc       brief command description (for help)
  */
 void
 pcmk__set_cli_options(const char *short_options, const char *app_usage,
                       pcmk__cli_option_t *long_options, const char *app_desc)
 {
     if (short_options) {
         crm_short_options = strdup(short_options);
 
     } else if (long_options) {
         int lpc = 0;
         int opt_string_len = 0;
         char *local_short_options = NULL;
 
         for (lpc = 0; long_options[lpc].name != NULL; lpc++) {
             if (long_options[lpc].val && long_options[lpc].val != '-' && long_options[lpc].val < UCHAR_MAX) {
                 local_short_options = pcmk__realloc(local_short_options,
                                                     opt_string_len + 4);
                 local_short_options[opt_string_len++] = long_options[lpc].val;
                 /* getopt(3) says: Two colons mean an option takes an optional arg; */
                 if (long_options[lpc].has_arg == optional_argument) {
                     local_short_options[opt_string_len++] = ':';
                 }
                 if (long_options[lpc].has_arg >= required_argument) {
                     local_short_options[opt_string_len++] = ':';
                 }
                 local_short_options[opt_string_len] = 0;
             }
         }
         crm_short_options = local_short_options;
         crm_trace("Generated short option string: '%s'", local_short_options);
     }
 
     if (long_options) {
         crm_long_options = long_options;
     }
     if (app_desc) {
         crm_app_description = app_desc;
     }
     if (app_usage) {
         crm_app_usage = app_usage;
     }
 }
 
 int
 pcmk__next_cli_option(int argc, char **argv, int *index, const char **longname)
 {
 #ifdef HAVE_GETOPT_H
     static struct option *long_opts = NULL;
 
     if (long_opts == NULL && crm_long_options) {
         long_opts = create_long_opts(crm_long_options);
     }
 
     *index = 0;
     if (long_opts) {
         int flag = getopt_long(argc, argv, crm_short_options, long_opts, index);
 
         switch (flag) {
             case 0:
                 if (long_opts[*index].val) {
                     return long_opts[*index].val;
                 } else if (longname) {
                     *longname = long_opts[*index].name;
                 } else {
                     crm_notice("Unhandled option --%s", long_opts[*index].name);
                     return flag;
                 }
             case -1:           /* End of option processing */
                 break;
             case ':':
                 crm_trace("Missing argument");
                 pcmk__cli_help('?', CRM_EX_USAGE);
                 break;
             case '?':
                 pcmk__cli_help('?', (*index? CRM_EX_OK : CRM_EX_USAGE));
                 break;
         }
         return flag;
     }
 #endif
 
     if (crm_short_options) {
         return getopt(argc, argv, crm_short_options);
     }
 
     return -1;
 }
 
 void
 pcmk__cli_help(char cmd, crm_exit_t exit_code)
 {
     int i = 0;
     FILE *stream = (exit_code ? stderr : stdout);
 
     if (cmd == 'v' || cmd == '$') {
         fprintf(stream, "Pacemaker %s\n", PACEMAKER_VERSION);
-        fprintf(stream, "Written by Andrew Beekhof\n");
+        fprintf(stream, "Written by Andrew Beekhof and "
+                        "the Pacemaker project contributors\n");
         goto out;
     }
 
     if (cmd == '!') {
         fprintf(stream, "Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
         goto out;
     }
 
     fprintf(stream, "%s - %s\n", crm_system_name, crm_app_description);
 
     if (crm_app_usage) {
         fprintf(stream, "Usage: %s %s\n", crm_system_name, crm_app_usage);
     }
 
     if (crm_long_options) {
         fprintf(stream, "Options:\n");
         for (i = 0; crm_long_options[i].name != NULL; i++) {
             if (crm_long_options[i].flags & pcmk__option_hidden) {
 
             } else if (crm_long_options[i].flags & pcmk__option_paragraph) {
                 fprintf(stream, "%s\n\n", crm_long_options[i].desc);
 
             } else if (crm_long_options[i].flags & pcmk__option_example) {
                 fprintf(stream, "\t#%s\n\n", crm_long_options[i].desc);
 
             } else if (crm_long_options[i].val == '-' && crm_long_options[i].desc) {
                 fprintf(stream, "%s\n", crm_long_options[i].desc);
 
             } else {
                 /* is val printable as char ? */
                 if (crm_long_options[i].val && crm_long_options[i].val <= UCHAR_MAX) {
                     fprintf(stream, " -%c,", crm_long_options[i].val);
                 } else {
                     fputs("    ", stream);
                 }
                 fprintf(stream, " --%s%s\t%s\n", crm_long_options[i].name,
                         crm_long_options[i].has_arg == optional_argument ? "[=value]" :
                         crm_long_options[i].has_arg == required_argument ? "=value" : "",
                         crm_long_options[i].desc ? crm_long_options[i].desc : "");
             }
         }
 
     } else if (crm_short_options) {
         fprintf(stream, "Usage: %s - %s\n", crm_system_name, crm_app_description);
         for (i = 0; crm_short_options[i] != 0; i++) {
             int has_arg = no_argument /* 0 */;
 
             if (crm_short_options[i + 1] == ':') {
                 if (crm_short_options[i + 2] == ':')
                     has_arg = optional_argument /* 2 */;
                 else
                     has_arg = required_argument /* 1 */;
             }
 
             fprintf(stream, " -%c %s\n", crm_short_options[i],
                     has_arg == optional_argument ? "[value]" :
                     has_arg == required_argument ? "{value}" : "");
             i += has_arg;
         }
     }
 
     fprintf(stream, "\nReport bugs to %s\n", PACKAGE_BUGREPORT);
 
   out:
     crm_exit(exit_code);
     while(1); // above does not return
 }
 
 
 /*
  * Environment variable option handling
  */
 
 /*!
  * \internal
  * \brief Get the value of a Pacemaker environment variable option
  *
  * If an environment variable option is set, with either a PCMK_ or (for
  * backward compatibility) HA_ prefix, log and return the value.
  *
  * \param[in] option  Environment variable name (without prefix)
  *
  * \return Value of environment variable option
  */
 const char *
 pcmk__env_option(const char *option)
 {
     char env_name[NAME_MAX];
     const char *value = NULL;
 
     snprintf(env_name, NAME_MAX, "PCMK_%s", option);
     value = getenv(env_name);
     if (value != NULL) {
         crm_trace("Found %s = %s", env_name, value);
         return value;
     }
 
     snprintf(env_name, NAME_MAX, "HA_%s", option);
     value = getenv(env_name);
     if (value != NULL) {
         crm_trace("Found %s = %s", env_name, value);
         return value;
     }
 
     crm_trace("Nothing found for %s", option);
     return NULL;
 }
 
 /*!
  * \brief Set or unset a Pacemaker environment variable option
  *
  * Set an environment variable option with both a PCMK_ and (for
  * backward compatibility) HA_ prefix.
  *
  * \param[in] option  Environment variable name (without prefix)
  * \param[in] value   New value (or NULL to unset)
  */
 void
 pcmk__set_env_option(const char *option, const char *value)
 {
     char env_name[NAME_MAX];
 
     snprintf(env_name, NAME_MAX, "PCMK_%s", option);
     if (value) {
         crm_trace("Setting %s to %s", env_name, value);
         setenv(env_name, value, 1);
     } else {
         crm_trace("Unsetting %s", env_name);
         unsetenv(env_name);
     }
 
     snprintf(env_name, NAME_MAX, "HA_%s", option);
     if (value) {
         crm_trace("Setting %s to %s", env_name, value);
         setenv(env_name, value, 1);
     } else {
         crm_trace("Unsetting %s", env_name);
         unsetenv(env_name);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether Pacemaker environment variable option is enabled
  *
  * Given a Pacemaker environment variable option that can either be boolean
  * or a list of daemon names, return true if the option is enabled for a given
  * daemon.
  *
  * \param[in] daemon   Daemon name
  * \param[in] option   Pacemaker environment variable name
  *
  * \return true if variable is enabled for daemon, otherwise false
  */
 bool
 pcmk__env_option_enabled(const char *daemon, const char *option)
 {
     const char *value = pcmk__env_option(option);
 
     return (value != NULL) && (crm_is_true(value) || strstr(value, daemon));
 }
 
 
 /*
  * Cluster option handling
  */
 
 bool
 pcmk__valid_interval_spec(const char *value)
 {
     (void) crm_parse_interval_spec(value);
     return errno == 0;
 }
 
 bool
 pcmk__valid_boolean(const char *value)
 {
     int tmp;
 
     return crm_str_to_boolean(value, &tmp) == 1;
 }
 
 bool
 pcmk__valid_number(const char *value)
 {
     if (value == NULL) {
         return false;
 
     } else if (pcmk_str_is_minus_infinity(value) ||
                pcmk_str_is_infinity(value)) {
         return true;
     }
 
     return pcmk__scan_ll(value, NULL, 0LL) == pcmk_rc_ok;
 }
 
 bool
 pcmk__valid_positive_number(const char *value)
 {
     long long num = 0LL;
 
     return pcmk_str_is_infinity(value)
            || ((pcmk__scan_ll(value, &num, 0LL) == pcmk_rc_ok) && (num > 0));
 }
 
 bool
 pcmk__valid_quorum(const char *value)
 {
     return pcmk__strcase_any_of(value, "stop", "freeze", "ignore", "demote", "suicide", NULL);
 }
 
 bool
 pcmk__valid_script(const char *value)
 {
     struct stat st;
 
     if (pcmk__str_eq(value, "/dev/null", pcmk__str_casei)) {
         return true;
     }
 
     if (stat(value, &st) != 0) {
         crm_err("Script %s does not exist", value);
         return false;
     }
 
     if (S_ISREG(st.st_mode) == 0) {
         crm_err("Script %s is not a regular file", value);
         return false;
     }
 
     if ((st.st_mode & (S_IXUSR | S_IXGRP)) == 0) {
         crm_err("Script %s is not executable", value);
         return false;
     }
 
     return true;
 }
 
 bool
 pcmk__valid_percentage(const char *value)
 {
     char *end = NULL;
     long number = strtol(value, &end, 10);
 
     if (end && (end[0] != '%')) {
         return false;
     }
     return number >= 0;
 }
 
 /*!
  * \internal
  * \brief Check a table of configured options for a particular option
  *
  * \param[in] options    Name/value pairs for configured options
  * \param[in] validate   If not NULL, validator function for option value
  * \param[in] name       Option name to look for
  * \param[in] old_name   Alternative option name to look for
  * \param[in] def_value  Default to use if option not configured
  *
  * \return Option value (from supplied options table or default value)
  */
 static const char *
 cluster_option_value(GHashTable *options, bool (*validate)(const char *),
                      const char *name, const char *old_name,
                      const char *def_value)
 {
     const char *value = NULL;
     char *new_value = NULL;
 
     CRM_ASSERT(name != NULL);
 
     if (options) {
         value = g_hash_table_lookup(options, name);
 
         if ((value == NULL) && old_name) {
             value = g_hash_table_lookup(options, old_name);
             if (value != NULL) {
                 pcmk__config_warn("Support for legacy name '%s' for cluster "
                                   "option '%s' is deprecated and will be "
                                   "removed in a future release",
                                   old_name, name);
 
                 // Inserting copy with current name ensures we only warn once
                 new_value = strdup(value);
                 g_hash_table_insert(options, strdup(name), new_value);
                 value = new_value;
             }
         }
 
         if (value && validate && (validate(value) == FALSE)) {
             pcmk__config_err("Using default value for cluster option '%s' "
                              "because '%s' is invalid", name, value);
             value = NULL;
         }
 
         if (value) {
             return value;
         }
     }
 
     // No value found, use default
     value = def_value;
 
     if (value == NULL) {
         crm_trace("No value or default provided for cluster option '%s'",
                   name);
         return NULL;
     }
 
     if (validate) {
         CRM_CHECK(validate(value) != FALSE,
                   crm_err("Bug: default value for cluster option '%s' is invalid", name);
                   return NULL);
     }
 
     crm_trace("Using default value '%s' for cluster option '%s'",
               value, name);
     if (options) {
         new_value = strdup(value);
         g_hash_table_insert(options, strdup(name), new_value);
         value = new_value;
     }
     return value;
 }
 
 /*!
  * \internal
  * \brief Get the value of a cluster option
  *
  * \param[in] options      Name/value pairs for configured options
  * \param[in] option_list  Possible cluster options
  * \param[in] name         (Primary) option name to look for
  *
  * \return Option value
  */
 const char *
 pcmk__cluster_option(GHashTable *options, pcmk__cluster_option_t *option_list,
                      int len, const char *name)
 {
     const char *value = NULL;
 
     for (int lpc = 0; lpc < len; lpc++) {
         if (pcmk__str_eq(name, option_list[lpc].name, pcmk__str_casei)) {
             value = cluster_option_value(options, option_list[lpc].is_valid,
                                          option_list[lpc].name,
                                          option_list[lpc].alt_name,
                                          option_list[lpc].default_value);
             return value;
         }
     }
     CRM_CHECK(FALSE, crm_err("Bug: looking for unknown option '%s'", name));
     return NULL;
 }
 
 char *
 pcmk__format_option_metadata(const char *name, const char *desc_short,
                              const char *desc_long,
                              pcmk__cluster_option_t *option_list, int len)
 {
 #ifdef ENABLE_NLS
     char *locale = NULL;
 #endif
     char *escaped_long = NULL;
     char *escaped_short = NULL;
     char *retval;
     /* big enough to hold "pacemaker-schedulerd metadata" output */
     GString *s = g_string_sized_new(13000);
     int lpc = 0;
 
     escaped_long = crm_xml_escape(desc_long);
     escaped_short = crm_xml_escape(desc_short);
 
     g_string_append_printf(s, "<?xml version=\"1.0\"?>"
                               "<!DOCTYPE resource-agent SYSTEM \"ra-api-1.dtd\">\n"
                               "<resource-agent name=\"%s\">\n"
                               "  <version>%s</version>\n"
                               "  <longdesc lang=\"en\">%s</longdesc>\n"
                               "  <shortdesc lang=\"en\">%s</shortdesc>\n"
                               "  <parameters>\n",
                               name, PCMK_OCF_VERSION, escaped_long, escaped_short);
     free(escaped_long);
     free(escaped_short);
 
     for (lpc = 0; lpc < len; lpc++) {
         if ((option_list[lpc].description_long == NULL)
             && (option_list[lpc].description_short == NULL)) {
             continue;
         }
 
         g_string_append_printf(s, "    <parameter name=\"%s\">\n",
                                   option_list[lpc].name);
 
         escaped_long = crm_xml_escape(option_list[lpc].description_long?
                                          option_list[lpc].description_long :
                                           option_list[lpc].description_short);
         escaped_short = crm_xml_escape(option_list[lpc].description_short);
 
         g_string_append_printf(s,
                                   "      <longdesc lang=\"en\">%s%s%s</longdesc>\n"
                                   "      <shortdesc lang=\"en\">%s</shortdesc>\n",
                                   escaped_long,
                                   (option_list[lpc].values? "  Allowed values: " : ""),
                                   (option_list[lpc].values? option_list[lpc].values : ""),
                                   escaped_short);
 
 	free(escaped_long);
 	free(escaped_short);
 #ifdef ENABLE_NLS
         escaped_long = crm_xml_escape(option_list[lpc].description_long?
                                          _(option_list[lpc].description_long) :
                                           _(option_list[lpc].description_short));
         escaped_short = crm_xml_escape(_(option_list[lpc].description_short));
 
 	locale=strtok(setlocale(LC_ALL,NULL),"_");
 	g_string_append_printf(s,
                                   "      <longdesc lang=\"%s\">%s%s%s</longdesc>\n"
                                   "      <shortdesc lang=\"%s\">%s</shortdesc>\n",
                                   locale,
 				  escaped_long,
                                   (option_list[lpc].values? "  Allowed values: " : ""),
                                   (option_list[lpc].values? option_list[lpc].values : ""),
                                   locale,
 				  escaped_short);
         free(escaped_long);
         free(escaped_short);
 #endif
 
         if (option_list[lpc].values && !strcmp(option_list[lpc].type, "select")) {
             char *str = strdup(option_list[lpc].values);
             char delim[] = ", ";
             char *ptr = strtok(str, delim);
 
             g_string_append_printf(s, "      <content type=\"%s\" default=\"%s\">\n",
                                    option_list[lpc].type,
                                    option_list[lpc].default_value);
 
             while (ptr != NULL) {
                 g_string_append_printf(s, "        <option value=\"%s\" />\n", ptr);
                 ptr = strtok(NULL, delim);
             }
 
             g_string_append_printf(s, "      </content>\n");
             free(str);
 
         } else {
             g_string_append_printf(s, "      <content type=\"%s\" default=\"%s\"/>\n",
                                    option_list[lpc].type,
                                    option_list[lpc].default_value
             );
         }
 
         g_string_append_printf(s, "    </parameter>\n");
     }
     g_string_append_printf(s, "  </parameters>\n</resource-agent>\n");
 
     retval = s->str;
     g_string_free(s, FALSE);
     return retval;
 }
 
 void
 pcmk__validate_cluster_options(GHashTable *options,
                                pcmk__cluster_option_t *option_list, int len)
 {
     for (int lpc = 0; lpc < len; lpc++) {
         cluster_option_value(options, option_list[lpc].is_valid,
                              option_list[lpc].name,
                              option_list[lpc].alt_name,
                              option_list[lpc].default_value);
     }
 }
diff --git a/lib/common/output_html.c b/lib/common/output_html.c
index c5427b4ece..54d3fd54ab 100644
--- a/lib/common/output_html.c
+++ b/lib/common/output_html.c
@@ -1,459 +1,461 @@
 /*
- * Copyright 2019-2021 the Pacemaker project contributors
+ * Copyright 2019-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 #include <libxml/HTMLtree.h>
 #include <stdarg.h>
 #include <stdlib.h>
 #include <stdio.h>
 
 #include <crm/common/xml.h>
 
 static const char *stylesheet_default =
     ".bold { font-weight: bold }\n"
     ".maint { color: blue }\n"
     ".offline { color: red }\n"
     ".online { color: green }\n"
     ".rsc-failed { color: red }\n"
     ".rsc-failure-ignored { color: yellow }\n"
     ".rsc-managed { color: yellow }\n"
     ".rsc-multiple { color: orange }\n"
     ".rsc-ok { color: green }\n"
     ".standby { color: orange }\n"
     ".warning { color: red, font-weight: bold }";
 
 static gboolean cgi_output = FALSE;
 static char *stylesheet_link = NULL;
 static char *title = NULL;
 static GSList *extra_headers = NULL;
 
 GOptionEntry pcmk__html_output_entries[] = {
     { "html-cgi", 0, 0, G_OPTION_ARG_NONE, &cgi_output,
       "Add CGI headers (requires --output-as=html)",
       NULL },
 
     { "html-stylesheet", 0, 0, G_OPTION_ARG_STRING, &stylesheet_link,
       "Link to an external stylesheet (requires --output-as=html)",
       "URI" },
 
     { "html-title", 0, 0, G_OPTION_ARG_STRING, &title,
       "Specify a page title (requires --output-as=html)",
       "TITLE" },
 
     { NULL }
 };
 
 /* The first several elements of this struct must be the same as the first
  * several elements of private_data_s in lib/common/output_xml.c.  This
  * struct gets passed to a bunch of the pcmk__output_xml_* functions which
  * assume an XML private_data_s.  Keeping them laid out the same means this
  * still works.
  */
 typedef struct private_data_s {
     /* Begin members that must match the XML version */
     xmlNode *root;
     GQueue *parent_q;
     GSList *errors;
     /* End members that must match the XML version */
 } private_data_t;
 
 static void
 html_free_priv(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
 
     if (priv == NULL) {
         return;
     }
 
     xmlFreeNode(priv->root);
     g_queue_free(priv->parent_q);
     g_slist_free(priv->errors);
     free(priv);
     out->priv = NULL;
 }
 
 static bool
 html_init(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     /* If html_init was previously called on this output struct, just return. */
     if (out->priv != NULL) {
         return true;
     } else {
         out->priv = calloc(1, sizeof(private_data_t));
         if (out->priv == NULL) {
             return false;
         }
 
         priv = out->priv;
     }
 
     priv->parent_q = g_queue_new();
 
     priv->root = create_xml_node(NULL, "html");
     xmlCreateIntSubset(priv->root->doc, (pcmkXmlStr) "html", NULL, NULL);
 
     crm_xml_add(priv->root, "lang", "en");
     g_queue_push_tail(priv->parent_q, priv->root);
     priv->errors = NULL;
 
     pcmk__output_xml_create_parent(out, "body", NULL);
 
     return true;
 }
 
 static void
 add_error_node(gpointer data, gpointer user_data) {
     char *str = (char *) data;
     pcmk__output_t *out = (pcmk__output_t *) user_data;
     out->list_item(out, NULL, "%s", str);
 }
 
 static void
 html_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
     private_data_t *priv = out->priv;
     htmlNodePtr head_node = NULL;
     htmlNodePtr charset_node = NULL;
 
     /* If root is NULL, html_init failed and we are being called from pcmk__output_free
      * in the pcmk__output_new path.
      */
     if (priv == NULL || priv->root == NULL) {
         return;
     }
 
     if (cgi_output && print) {
         fprintf(out->dest, "Content-Type: text/html\n\n");
     }
 
     /* Add the head node last - it's not needed earlier because it doesn't contain
      * anything else that the user could add, and we want it done last to pick up
      * any options that may have been given.
      */
     head_node = xmlNewNode(NULL, (pcmkXmlStr) "head");
 
     if (title != NULL ) {
         pcmk_create_xml_text_node(head_node, "title", title);
     } else if (out->request != NULL) {
         pcmk_create_xml_text_node(head_node, "title", out->request);
     }
 
     charset_node = create_xml_node(head_node, "meta");
     crm_xml_add(charset_node, "charset", "utf-8");
 
     /* Add any extra header nodes the caller might have created. */
     for (int i = 0; i < g_slist_length(extra_headers); i++) {
         xmlAddChild(head_node, xmlCopyNode(g_slist_nth_data(extra_headers, i), 1));
     }
 
     /* Stylesheets are included two different ways.  The first is via a built-in
      * default (see the stylesheet_default const above).  The second is via the
      * html-stylesheet option, and this should obviously be a link to a
      * stylesheet.  The second can override the first.  At least one should be
      * given.
      */
     pcmk_create_xml_text_node(head_node, "style", stylesheet_default);
 
     if (stylesheet_link != NULL) {
         htmlNodePtr link_node = create_xml_node(head_node, "link");
         pcmk__xe_set_props(link_node, "rel", "stylesheet",
                            "href", stylesheet_link,
                            NULL);
     }
 
     xmlAddPrevSibling(priv->root->children, head_node);
 
     if (g_slist_length(priv->errors) > 0) {
         out->begin_list(out, "Errors", NULL, NULL);
         g_slist_foreach(priv->errors, add_error_node, (gpointer) out);
         out->end_list(out);
     }
 
     if (print) {
         htmlDocDump(out->dest, priv->root->doc);
     }
 
     if (copy_dest != NULL) {
         *copy_dest = copy_xml(priv->root);
     }
 
     g_slist_free_full(extra_headers, (GDestroyNotify) xmlFreeNode);
     extra_headers = NULL;
 }
 
 static void
 html_reset(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
 
     out->dest = freopen(NULL, "w", out->dest);
     CRM_ASSERT(out->dest != NULL);
 
     html_free_priv(out);
     html_init(out);
 }
 
 static void
 html_subprocess_output(pcmk__output_t *out, int exit_status,
                       const char *proc_stdout, const char *proc_stderr) {
     char *rc_buf = NULL;
 
     CRM_ASSERT(out != NULL);
 
     rc_buf = crm_strdup_printf("Return code: %d", exit_status);
 
     pcmk__output_create_xml_text_node(out, "h2", "Command Output");
     pcmk__output_create_html_node(out, "div", NULL, NULL, rc_buf);
 
     if (proc_stdout != NULL) {
         pcmk__output_create_html_node(out, "div", NULL, NULL, "Stdout");
         pcmk__output_create_html_node(out, "div", NULL, "output", proc_stdout);
     }
     if (proc_stderr != NULL) {
         pcmk__output_create_html_node(out, "div", NULL, NULL, "Stderr");
         pcmk__output_create_html_node(out, "div", NULL, "output", proc_stderr);
     }
 
     free(rc_buf);
 }
 
 static void
 html_version(pcmk__output_t *out, bool extended) {
     CRM_ASSERT(out != NULL);
 
     pcmk__output_create_xml_text_node(out, "h2", "Version Information");
     pcmk__output_create_html_node(out, "div", NULL, NULL, "Program: Pacemaker");
     pcmk__output_create_html_node(out, "div", NULL, NULL, crm_strdup_printf("Version: %s", PACEMAKER_VERSION));
-    pcmk__output_create_html_node(out, "div", NULL, NULL, "Author: Andrew Beekhof");
+    pcmk__output_create_html_node(out, "div", NULL, NULL,
+                                  "Author: Andrew Beekhof and "
+                                  "the Pacemaker project contributors");
     pcmk__output_create_html_node(out, "div", NULL, NULL, crm_strdup_printf("Build: %s", BUILD_VERSION));
     pcmk__output_create_html_node(out, "div", NULL, NULL, crm_strdup_printf("Features: %s", CRM_FEATURES));
 }
 
 G_GNUC_PRINTF(2, 3)
 static void
 html_err(pcmk__output_t *out, const char *format, ...) {
     private_data_t *priv = NULL;
     int len = 0;
     char *buf = NULL;
     va_list ap;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     va_start(ap, format);
     len = vasprintf(&buf, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     priv->errors = g_slist_append(priv->errors, buf);
 }
 
 G_GNUC_PRINTF(2, 3)
 static int
 html_info(pcmk__output_t *out, const char *format, ...) {
     return pcmk_rc_no_output;
 }
 
 static void
 html_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
     htmlNodePtr node = NULL;
 
     CRM_ASSERT(out != NULL);
 
     node = pcmk__output_create_html_node(out, "pre", NULL, NULL, buf);
     crm_xml_add(node, "lang", "xml");
 }
 
 G_GNUC_PRINTF(4, 5)
 static void
 html_begin_list(pcmk__output_t *out, const char *singular_noun,
                 const char *plural_noun, const char *format, ...) {
     int q_len = 0;
     private_data_t *priv = NULL;
     xmlNodePtr node = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     /* If we are already in a list (the queue depth is always at least
      * one because of the <html> element), first create a <li> element
      * to hold the <h2> and the new list.
      */
     q_len = g_queue_get_length(priv->parent_q);
     if (q_len > 2) {
         pcmk__output_xml_create_parent(out, "li", NULL);
     }
 
     if (format != NULL) {
         va_list ap;
         char *buf = NULL;
         int len;
 
         va_start(ap, format);
         len = vasprintf(&buf, format, ap);
         va_end(ap);
         CRM_ASSERT(len >= 0);
 
         if (q_len > 2) {
             pcmk__output_create_xml_text_node(out, "h3", buf);
         } else {
             pcmk__output_create_xml_text_node(out, "h2", buf);
         }
 
         free(buf);
     }
 
     node = pcmk__output_xml_create_parent(out, "ul", NULL);
     g_queue_push_tail(priv->parent_q, node);
 }
 
 G_GNUC_PRINTF(3, 4)
 static void
 html_list_item(pcmk__output_t *out, const char *name, const char *format, ...) {
     htmlNodePtr item_node = NULL;
     va_list ap;
     char *buf = NULL;
     int len;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
     len = vasprintf(&buf, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     item_node = pcmk__output_create_xml_text_node(out, "li", buf);
     free(buf);
 
     if (name != NULL) {
         crm_xml_add(item_node, "class", name);
     }
 }
 
 static void
 html_increment_list(pcmk__output_t *out) {
     /* This function intentially left blank */
 }
 
 static void
 html_end_list(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     /* Remove the <ul> tag. */
     g_queue_pop_tail(priv->parent_q);
     pcmk__output_xml_pop_parent(out);
 
     /* Remove the <li> created for nested lists. */
     if (g_queue_get_length(priv->parent_q) > 2) {
         pcmk__output_xml_pop_parent(out);
     }
 }
 
 static bool
 html_is_quiet(pcmk__output_t *out) {
     return false;
 }
 
 static void
 html_spacer(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
     pcmk__output_create_xml_node(out, "br", NULL);
 }
 
 static void
 html_progress(pcmk__output_t *out, bool end) {
     /* This function intentially left blank */
 }
 
 pcmk__output_t *
 pcmk__mk_html_output(char **argv) {
     pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
 
     if (retval == NULL) {
         return NULL;
     }
 
     retval->fmt_name = "html";
     retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
 
     retval->init = html_init;
     retval->free_priv = html_free_priv;
     retval->finish = html_finish;
     retval->reset = html_reset;
 
     retval->register_message = pcmk__register_message;
     retval->message = pcmk__call_message;
 
     retval->subprocess_output = html_subprocess_output;
     retval->version = html_version;
     retval->info = html_info;
     retval->err = html_err;
     retval->output_xml = html_output_xml;
 
     retval->begin_list = html_begin_list;
     retval->list_item = html_list_item;
     retval->increment_list = html_increment_list;
     retval->end_list = html_end_list;
 
     retval->is_quiet = html_is_quiet;
     retval->spacer = html_spacer;
     retval->progress = html_progress;
     retval->prompt = pcmk__text_prompt;
 
     return retval;
 }
 
 xmlNodePtr
 pcmk__output_create_html_node(pcmk__output_t *out, const char *element_name, const char *id,
                        const char *class_name, const char *text) {
     htmlNodePtr node = NULL;
 
     CRM_ASSERT(out != NULL);
 
     node = pcmk__output_create_xml_text_node(out, element_name, text);
 
     if (class_name != NULL) {
         crm_xml_add(node, "class", class_name);
     }
 
     if (id != NULL) {
         crm_xml_add(node, "id", id);
     }
 
     return node;
 }
 
 void
 pcmk__html_add_header(const char *name, ...) {
     htmlNodePtr header_node;
     va_list ap;
 
     va_start(ap, name);
 
     header_node = xmlNewNode(NULL, (pcmkXmlStr) name);
     while (1) {
         char *key = va_arg(ap, char *);
         char *value;
 
         if (key == NULL) {
             break;
         }
 
         value = va_arg(ap, char *);
         crm_xml_add(header_node, key, value);
     }
 
     extra_headers = g_slist_append(extra_headers, header_node);
 
     va_end(ap);
 }
diff --git a/lib/common/output_log.c b/lib/common/output_log.c
index b2b61a7448..cae7844792 100644
--- a/lib/common/output_log.c
+++ b/lib/common/output_log.c
@@ -1,314 +1,315 @@
 /*
- * Copyright 2019-2021 the Pacemaker project contributors
+ * Copyright 2019-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 #include <stdarg.h>
 #include <stdlib.h>
 #include <stdio.h>
 
 GOptionEntry pcmk__log_output_entries[] = {
     { NULL }
 };
 
 typedef struct private_data_s {
     /* gathered in log_begin_list */
     GQueue/*<char*>*/ *prefixes;
     int log_level;
 } private_data_t;
 
 static void
 log_subprocess_output(pcmk__output_t *out, int exit_status,
                       const char *proc_stdout, const char *proc_stderr) {
     /* This function intentionally left blank */
 }
 
 static void
 log_free_priv(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
 
     if (priv == NULL) {
         return;
     }
 
     g_queue_free(priv->prefixes);
     free(priv);
     out->priv = NULL;
 }
 
 static bool
 log_init(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     /* If log_init was previously called on this output struct, just return. */
     if (out->priv != NULL) {
         return true;
     }
 
     out->priv = calloc(1, sizeof(private_data_t));
     if (out->priv == NULL) {
          return false;
     }
 
     priv = out->priv;
 
     priv->prefixes = g_queue_new();
     priv->log_level = LOG_INFO;
 
     return true;
 }
 
 static void
 log_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
     /* This function intentionally left blank */
 }
 
 static void
 log_reset(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
 
     out->dest = freopen(NULL, "w", out->dest);
     CRM_ASSERT(out->dest != NULL);
 
     log_free_priv(out);
     log_init(out);
 }
 
 static void
 log_version(pcmk__output_t *out, bool extended) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     if (extended) {
         do_crm_log(priv->log_level, "Pacemaker %s (Build: %s): %s",
                    PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
     } else {
         do_crm_log(priv->log_level, "Pacemaker %s", PACEMAKER_VERSION);
-        do_crm_log(priv->log_level, "Written by Andrew Beekhof");
+        do_crm_log(priv->log_level, "Written by Andrew Beekhof and"
+                                    "the Pacemaker project contributors");
     }
 }
 
 G_GNUC_PRINTF(2, 3)
 static void
 log_err(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
     char* buffer = NULL;
     int len = 0;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
     /* Informational output does not get indented, to separate it from other
      * potentially indented list output.
      */
     len = vasprintf(&buffer, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     crm_err("%s", buffer);
 
     free(buffer);
 }
 
 static void
 log_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
     xmlNodePtr node = NULL;
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     node = create_xml_node(NULL, name);
     xmlNodeSetContent(node, (pcmkXmlStr) buf);
     do_crm_log_xml(priv->log_level, name, node);
     free(node);
 }
 
 G_GNUC_PRINTF(4, 5)
 static void
 log_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun,
                const char *format, ...) {
     int len = 0;
     va_list ap;
     char* buffer = NULL;
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     va_start(ap, format);
     len = vasprintf(&buffer, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     /* Don't skip empty prefixes,
      * otherwise there will be mismatch
      * in the log_end_list */
     if(strcmp(buffer, "") == 0) {
         /* nothing */
     }
 
     g_queue_push_tail(priv->prefixes, buffer);
 }
 
 G_GNUC_PRINTF(3, 4)
 static void
 log_list_item(pcmk__output_t *out, const char *name, const char *format, ...) {
     int len = 0;
     va_list ap;
     private_data_t *priv = NULL;
     char prefix[LINE_MAX] = { 0 };
     int offset = 0;
     char* buffer = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     for (GList* gIter = priv->prefixes->head; gIter; gIter = gIter->next) {
         if (strcmp(prefix, "") != 0) {
             offset += snprintf(prefix + offset, LINE_MAX - offset, ": %s", (char *)gIter->data);
         } else {
             offset = snprintf(prefix, LINE_MAX, "%s", (char *)gIter->data);
         }
     }
 
     va_start(ap, format);
     len = vasprintf(&buffer, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     if (strcmp(buffer, "") != 0) { /* We don't want empty messages */
         if ((name != NULL) && (strcmp(name, "") != 0)) {
             if (strcmp(prefix, "") != 0) {
                 do_crm_log(priv->log_level, "%s: %s: %s", prefix, name, buffer);
             } else {
                 do_crm_log(priv->log_level, "%s: %s", name, buffer);
             }
         } else {
             if (strcmp(prefix, "") != 0) {
                 do_crm_log(priv->log_level, "%s: %s", prefix, buffer);
             } else {
                 do_crm_log(priv->log_level, "%s", buffer);
             }
         }
     }
     free(buffer);
 }
 
 static void
 log_end_list(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     if (priv->prefixes == NULL) {
       return;
     }
     CRM_ASSERT(priv->prefixes->tail != NULL);
 
     free((char *)priv->prefixes->tail->data);
     g_queue_pop_tail(priv->prefixes);
 }
 
 G_GNUC_PRINTF(2, 3)
 static int
 log_info(pcmk__output_t *out, const char *format, ...) {
     private_data_t *priv = NULL;
     int len = 0;
     va_list ap;
     char* buffer = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     va_start(ap, format);
     len = vasprintf(&buffer, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     do_crm_log(priv->log_level, "%s", buffer);
 
     free(buffer);
     return pcmk_rc_ok;
 }
 
 static bool
 log_is_quiet(pcmk__output_t *out) {
     return false;
 }
 
 static void
 log_spacer(pcmk__output_t *out) {
     /* This function intentionally left blank */
 }
 
 static void
 log_progress(pcmk__output_t *out, bool end) {
     /* This function intentionally left blank */
 }
 
 static void
 log_prompt(const char *prompt, bool echo, char **dest) {
     /* This function intentionally left blank */
 }
 
 pcmk__output_t *
 pcmk__mk_log_output(char **argv) {
     pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
 
     if (retval == NULL) {
         return NULL;
     }
 
     retval->fmt_name = "log";
     retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
 
     retval->init = log_init;
     retval->free_priv = log_free_priv;
     retval->finish = log_finish;
     retval->reset = log_reset;
 
     retval->register_message = pcmk__register_message;
     retval->message = pcmk__call_message;
 
     retval->subprocess_output = log_subprocess_output;
     retval->version = log_version;
     retval->info = log_info;
     retval->err = log_err;
     retval->output_xml = log_output_xml;
 
     retval->begin_list = log_begin_list;
     retval->list_item = log_list_item;
     retval->end_list = log_end_list;
 
     retval->is_quiet = log_is_quiet;
     retval->spacer = log_spacer;
     retval->progress = log_progress;
     retval->prompt = log_prompt;
 
     return retval;
 }
 
 void
 pcmk__output_set_log_level(pcmk__output_t *out, int log_level) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
 
     if (!pcmk__str_eq(out->fmt_name, "log", pcmk__str_none)) {
         return;
     }
 
     priv = out->priv;
     priv->log_level = log_level;
 }
diff --git a/lib/common/output_text.c b/lib/common/output_text.c
index def89fa349..915e6a8258 100644
--- a/lib/common/output_text.c
+++ b/lib/common/output_text.c
@@ -1,433 +1,434 @@
 /*
  * Copyright 2019-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdarg.h>
 #include <stdlib.h>
 #include <glib.h>
 #include <termios.h>
 
 static gboolean fancy = FALSE;
 
 GOptionEntry pcmk__text_output_entries[] = {
     { "text-fancy", 0, 0, G_OPTION_ARG_NONE, &fancy,
       "Use more highly formatted output (requires --output-as=text)",
       NULL },
 
     { NULL }
 };
 
 typedef struct text_list_data_s {
     unsigned int len;
     char *singular_noun;
     char *plural_noun;
 } text_list_data_t;
 
 typedef struct private_data_s {
     GQueue *parent_q;
 } private_data_t;
 
 static void
 text_free_priv(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
 
     if (priv == NULL) {
         return;
     }
 
     g_queue_free(priv->parent_q);
     free(priv);
     out->priv = NULL;
 }
 
 static bool
 text_init(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     /* If text_init was previously called on this output struct, just return. */
     if (out->priv != NULL) {
         return true;
     } else {
         out->priv = calloc(1, sizeof(private_data_t));
         if (out->priv == NULL) {
             return false;
         }
 
         priv = out->priv;
     }
 
     priv->parent_q = g_queue_new();
     return true;
 }
 
 static void
 text_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
     fflush(out->dest);
 }
 
 static void
 text_reset(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
 
     if (out->dest != stdout) {
         out->dest = freopen(NULL, "w", out->dest);
     }
 
     CRM_ASSERT(out->dest != NULL);
 
     text_free_priv(out);
     text_init(out);
 }
 
 static void
 text_subprocess_output(pcmk__output_t *out, int exit_status,
                        const char *proc_stdout, const char *proc_stderr) {
     CRM_ASSERT(out != NULL);
 
     if (proc_stdout != NULL) {
         fprintf(out->dest, "%s\n", proc_stdout);
     }
 
     if (proc_stderr != NULL) {
         fprintf(out->dest, "%s\n", proc_stderr);
     }
 }
 
 static void
 text_version(pcmk__output_t *out, bool extended) {
     CRM_ASSERT(out != NULL);
 
     if (extended) {
         fprintf(out->dest, "Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
     } else {
         fprintf(out->dest, "Pacemaker %s\n", PACEMAKER_VERSION);
-        fprintf(out->dest, "Written by Andrew Beekhof\n");
+        fprintf(out->dest, "Written by Andrew Beekhof and "
+                           "the Pacemaker project contributors\n");
     }
 }
 
 G_GNUC_PRINTF(2, 3)
 static void
 text_err(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
     int len = 0;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
 
     /* Informational output does not get indented, to separate it from other
      * potentially indented list output.
      */
     len = vfprintf(stderr, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     /* Add a newline. */
     fprintf(stderr, "\n");
 }
 
 G_GNUC_PRINTF(2, 3)
 static int
 text_info(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
     int len = 0;
 
     CRM_ASSERT(out != NULL);
 
     if (out->is_quiet(out)) {
         return pcmk_rc_no_output;
     }
 
     va_start(ap, format);
 
     /* Informational output does not get indented, to separate it from other
      * potentially indented list output.
      */
     len = vfprintf(out->dest, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     /* Add a newline. */
     fprintf(out->dest, "\n");
     return pcmk_rc_ok;
 }
 
 static void
 text_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
     CRM_ASSERT(out != NULL);
     pcmk__indented_printf(out, "%s", buf);
 }
 
 G_GNUC_PRINTF(4, 5)
 static void
 text_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun,
                 const char *format, ...) {
     private_data_t *priv = NULL;
     text_list_data_t *new_list = NULL;
     va_list ap;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     va_start(ap, format);
 
     if (fancy && format) {
         pcmk__indented_vprintf(out, format, ap);
         fprintf(out->dest, ":\n");
     }
 
     va_end(ap);
 
     new_list = calloc(1, sizeof(text_list_data_t));
     new_list->len = 0;
     pcmk__str_update(&new_list->singular_noun, singular_noun);
     pcmk__str_update(&new_list->plural_noun, plural_noun);
 
     g_queue_push_tail(priv->parent_q, new_list);
 }
 
 G_GNUC_PRINTF(3, 4)
 static void
 text_list_item(pcmk__output_t *out, const char *id, const char *format, ...) {
     va_list ap;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
 
     if (fancy) {
         if (id != NULL) {
             /* Not really a good way to do this all in one call, so make it two.
              * The first handles the indentation and list styling.  The second
              * just prints right after that one.
              */
             pcmk__indented_printf(out, "%s: ", id);
             vfprintf(out->dest, format, ap);
         } else {
             pcmk__indented_vprintf(out, format, ap);
         }
     } else {
         pcmk__indented_vprintf(out, format, ap);
     }
 
     fputc('\n', out->dest);
     fflush(out->dest);
     va_end(ap);
 
     out->increment_list(out);
 }
 
 static void
 text_increment_list(pcmk__output_t *out) {
     private_data_t *priv = NULL;
     gpointer tail;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     tail = g_queue_peek_tail(priv->parent_q);
     CRM_ASSERT(tail != NULL);
     ((text_list_data_t *) tail)->len++;
 }
 
 static void
 text_end_list(pcmk__output_t *out) {
     private_data_t *priv = NULL;
     text_list_data_t *node = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     node = g_queue_pop_tail(priv->parent_q);
 
     if (node->singular_noun != NULL && node->plural_noun != NULL) {
         if (node->len == 1) {
             pcmk__indented_printf(out, "%d %s found\n", node->len, node->singular_noun);
         } else {
             pcmk__indented_printf(out, "%d %s found\n", node->len, node->plural_noun);
         }
     }
 
     free(node);
 }
 
 static bool
 text_is_quiet(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
     return out->quiet;
 }
 
 static void
 text_spacer(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
     fprintf(out->dest, "\n");
 }
 
 static void
 text_progress(pcmk__output_t *out, bool end) {
     CRM_ASSERT(out != NULL);
 
     if (out->dest == stdout) {
         fprintf(out->dest, ".");
 
         if (end) {
             fprintf(out->dest, "\n");
         }
     }
 }
 
 pcmk__output_t *
 pcmk__mk_text_output(char **argv) {
     pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
 
     if (retval == NULL) {
         return NULL;
     }
 
     retval->fmt_name = "text";
     retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
 
     retval->init = text_init;
     retval->free_priv = text_free_priv;
     retval->finish = text_finish;
     retval->reset = text_reset;
 
     retval->register_message = pcmk__register_message;
     retval->message = pcmk__call_message;
 
     retval->subprocess_output = text_subprocess_output;
     retval->version = text_version;
     retval->info = text_info;
     retval->err = text_err;
     retval->output_xml = text_output_xml;
 
     retval->begin_list = text_begin_list;
     retval->list_item = text_list_item;
     retval->increment_list = text_increment_list;
     retval->end_list = text_end_list;
 
     retval->is_quiet = text_is_quiet;
     retval->spacer = text_spacer;
     retval->progress = text_progress;
     retval->prompt = pcmk__text_prompt;
 
     return retval;
 }
 
 G_GNUC_PRINTF(2, 0)
 void
 pcmk__formatted_vprintf(pcmk__output_t *out, const char *format, va_list args) {
     int len = 0;
 
     CRM_ASSERT(out != NULL);
 
     len = vfprintf(out->dest, format, args);
     CRM_ASSERT(len >= 0);
 }
 
 G_GNUC_PRINTF(2, 3)
 void
 pcmk__formatted_printf(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
     pcmk__formatted_vprintf(out, format, ap);
     va_end(ap);
 }
 
 G_GNUC_PRINTF(2, 0)
 void
 pcmk__indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
     CRM_ASSERT(out != NULL);
 
     if (!pcmk__str_eq(out->fmt_name, "text", pcmk__str_none)) {
         return;
     }
 
     if (fancy) {
         int level = 0;
         private_data_t *priv = out->priv;
 
         CRM_ASSERT(priv != NULL);
 
         level = g_queue_get_length(priv->parent_q);
 
         for (int i = 0; i < level; i++) {
             fprintf(out->dest, "  ");
         }
 
         if (level > 0) {
             fprintf(out->dest, "* ");
         }
     }
 
     pcmk__formatted_vprintf(out, format, args);
 }
 
 G_GNUC_PRINTF(2, 3)
 void
 pcmk__indented_printf(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
     pcmk__indented_vprintf(out, format, ap);
     va_end(ap);
 }
 
 void
 pcmk__text_prompt(const char *prompt, bool echo, char **dest)
 {
     int rc = 0;
     struct termios settings;
     tcflag_t orig_c_lflag = 0;
 
     CRM_ASSERT(prompt != NULL);
     CRM_ASSERT(dest != NULL);
 
     if (!echo) {
         rc = tcgetattr(0, &settings);
         if (rc == 0) {
             orig_c_lflag = settings.c_lflag;
             settings.c_lflag &= ~ECHO;
             rc = tcsetattr(0, TCSANOW, &settings);
         }
     }
 
     if (rc == 0) {
         fprintf(stderr, "%s: ", prompt);
 
         if (*dest != NULL) {
             free(*dest);
             *dest = NULL;
         }
 
 #if SSCANF_HAS_M
         rc = scanf("%ms", dest);
 #else
         *dest = calloc(1, 1024);
         rc = scanf("%1023s", *dest);
 #endif
         fprintf(stderr, "\n");
     }
 
     if (rc < 1) {
         free(*dest);
         *dest = NULL;
     }
 
     if (orig_c_lflag != 0) {
         settings.c_lflag = orig_c_lflag;
         /* rc = */ tcsetattr(0, TCSANOW, &settings);
     }
 }
diff --git a/lib/common/output_xml.c b/lib/common/output_xml.c
index 6f4dd06228..c4e6df28c9 100644
--- a/lib/common/output_xml.c
+++ b/lib/common/output_xml.c
@@ -1,537 +1,538 @@
 /*
- * Copyright 2019-2021 the Pacemaker project contributors
+ * Copyright 2019-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 #include <stdarg.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <glib.h>
 
 #include <crm/common/xml.h>
 
 static gboolean legacy_xml = FALSE;
 static gboolean simple_list = FALSE;
 static gboolean substitute = FALSE;
 
 GOptionEntry pcmk__xml_output_entries[] = {
     { "xml-legacy", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &legacy_xml,
       NULL,
       NULL },
     { "xml-simple-list", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &simple_list,
       NULL,
       NULL },
     { "xml-substitute", 0, G_OPTION_FLAG_HIDDEN, G_OPTION_ARG_NONE, &substitute,
       NULL,
       NULL },
 
     { NULL }
 };
 
 typedef struct subst_s {
     const char *from;
     const char *to;
 } subst_t;
 
 static subst_t substitutions[] = {
     { "Active Resources",                               "resources" },
     { "Allocation Scores",                              "allocations" },
     { "Allocation Scores and Utilization Information",  "allocations_utilizations" },
     { "Cluster Summary",                                "summary" },
     { "Current cluster status",                         "cluster_status" },
     { "Executing Cluster Transition",                   "transition" },
     { "Failed Resource Actions",                        "failures" },
     { "Fencing History",                                "fence_history" },
     { "Full List of Resources",                         "resources" },
     { "Inactive Resources",                             "resources" },
     { "Migration Summary",                              "node_history" },
     { "Negative Location Constraints",                  "bans" },
     { "Node Attributes",                                "node_attributes" },
     { "Operations",                                     "node_history" },
     { "Resource Config",                                "resource_config" },
     { "Resource Operations",                            "operations" },
     { "Revised Cluster Status",                         "revised_cluster_status" },
     { "Transition Summary",                             "actions" },
     { "Utilization Information",                        "utilizations" },
 
     { NULL, NULL }
 };
 
 /* The first several elements of this struct must be the same as the first
  * several elements of private_data_s in lib/common/output_html.c.  That
  * struct gets passed to a bunch of the pcmk__output_xml_* functions which
  * assume an XML private_data_s.  Keeping them laid out the same means this
  * still works.
  */
 typedef struct private_data_s {
     /* Begin members that must match the HTML version */
     xmlNode *root;
     GQueue *parent_q;
     GSList *errors;
     /* End members that must match the HTML version */
     bool legacy_xml;
 } private_data_t;
 
 static void
 xml_free_priv(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
 
     if (priv == NULL) {
         return;
     }
 
     free_xml(priv->root);
     g_queue_free(priv->parent_q);
     g_slist_free(priv->errors);
     free(priv);
     out->priv = NULL;
 }
 
 static bool
 xml_init(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     /* If xml_init was previously called on this output struct, just return. */
     if (out->priv != NULL) {
         return true;
     } else {
         out->priv = calloc(1, sizeof(private_data_t));
         if (out->priv == NULL) {
             return false;
         }
 
         priv = out->priv;
     }
 
     if (legacy_xml) {
         priv->root = create_xml_node(NULL, "crm_mon");
         crm_xml_add(priv->root, "version", PACEMAKER_VERSION);
     } else {
         priv->root = create_xml_node(NULL, "pacemaker-result");
         crm_xml_add(priv->root, "api-version", PCMK__API_VERSION);
 
         if (out->request != NULL) {
             crm_xml_add(priv->root, "request", out->request);
         }
     }
 
     priv->parent_q = g_queue_new();
     priv->errors = NULL;
     g_queue_push_tail(priv->parent_q, priv->root);
 
     /* Copy this from the file-level variable.  This means that it is only settable
      * as a command line option, and that pcmk__output_new must be called after all
      * command line processing is completed.
      */
     priv->legacy_xml = legacy_xml;
 
     return true;
 }
 
 static void
 add_error_node(gpointer data, gpointer user_data) {
     char *str = (char *) data;
     xmlNodePtr node = (xmlNodePtr) user_data;
     pcmk_create_xml_text_node(node, "error", str);
 }
 
 static void
 xml_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
     private_data_t *priv = NULL;
     xmlNodePtr node;
 
     CRM_ASSERT(out != NULL);
     priv = out->priv;
 
     /* If root is NULL, xml_init failed and we are being called from pcmk__output_free
      * in the pcmk__output_new path.
      */
     if (priv == NULL || priv->root == NULL) {
         return;
     }
 
     if (legacy_xml) {
         GSList *node = priv->errors;
 
         if (exit_status != CRM_EX_OK) {
             fprintf(stderr, "%s\n", crm_exit_str(exit_status));
         }
 
         while (node != NULL) {
             fprintf(stderr, "%s\n", (char *) node->data);
             node = node->next;
         }
     } else {
         char *rc_as_str = pcmk__itoa(exit_status);
 
         node = create_xml_node(priv->root, "status");
         pcmk__xe_set_props(node, "code", rc_as_str,
                            "message", crm_exit_str(exit_status),
                            NULL);
 
         if (g_slist_length(priv->errors) > 0) {
             xmlNodePtr errors_node = create_xml_node(node, "errors");
             g_slist_foreach(priv->errors, add_error_node, (gpointer) errors_node);
         }
 
         free(rc_as_str);
     }
 
     if (print) {
         char *buf = dump_xml_formatted_with_text(priv->root);
         fprintf(out->dest, "%s", buf);
         fflush(out->dest);
         free(buf);
     }
 
     if (copy_dest != NULL) {
         *copy_dest = copy_xml(priv->root);
     }
 }
 
 static void
 xml_reset(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
 
     out->dest = freopen(NULL, "w", out->dest);
     CRM_ASSERT(out->dest != NULL);
 
     xml_free_priv(out);
     xml_init(out);
 }
 
 static void
 xml_subprocess_output(pcmk__output_t *out, int exit_status,
                       const char *proc_stdout, const char *proc_stderr) {
     xmlNodePtr node, child_node;
     char *rc_as_str = NULL;
 
     CRM_ASSERT(out != NULL);
 
     rc_as_str = pcmk__itoa(exit_status);
 
     node = pcmk__output_xml_create_parent(out, "command",
                                           "code", rc_as_str,
                                           NULL);
 
     if (proc_stdout != NULL) {
         child_node = pcmk_create_xml_text_node(node, "output", proc_stdout);
         crm_xml_add(child_node, "source", "stdout");
     }
 
     if (proc_stderr != NULL) {
         child_node = pcmk_create_xml_text_node(node, "output", proc_stderr);
         crm_xml_add(child_node, "source", "stderr");
     }
 
     pcmk__output_xml_add_node(out, node);
     free(rc_as_str);
 }
 
 static void
 xml_version(pcmk__output_t *out, bool extended) {
     CRM_ASSERT(out != NULL);
 
     pcmk__output_create_xml_node(out, "version",
                                  "program", "Pacemaker",
                                  "version", PACEMAKER_VERSION,
-                                 "author", "Andrew Beekhof",
+                                 "author", "Andrew Beekhof and the "
+                                           "Pacemaker project contributors",
                                  "build", BUILD_VERSION,
                                  "features", CRM_FEATURES,
                                  NULL);
 }
 
 G_GNUC_PRINTF(2, 3)
 static void
 xml_err(pcmk__output_t *out, const char *format, ...) {
     private_data_t *priv = NULL;
     int len = 0;
     char *buf = NULL;
     va_list ap;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     va_start(ap, format);
     len = vasprintf(&buf, format, ap);
     CRM_ASSERT(len > 0);
     va_end(ap);
 
     priv->errors = g_slist_append(priv->errors, buf);
 }
 
 G_GNUC_PRINTF(2, 3)
 static int
 xml_info(pcmk__output_t *out, const char *format, ...) {
     return pcmk_rc_no_output;
 }
 
 static void
 xml_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
     xmlNodePtr parent = NULL;
     xmlNodePtr cdata_node = NULL;
 
     CRM_ASSERT(out != NULL);
 
     parent = pcmk__output_create_xml_node(out, name, NULL);
     cdata_node = xmlNewCDataBlock(getDocPtr(parent), (pcmkXmlStr) buf, strlen(buf));
     xmlAddChild(parent, cdata_node);
 }
 
 G_GNUC_PRINTF(4, 5)
 static void
 xml_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun,
                const char *format, ...) {
     va_list ap;
     char *name = NULL;
     char *buf = NULL;
     int len;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
     len = vasprintf(&buf, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     if (substitute) {
         for (subst_t *s = substitutions; s->from != NULL; s++) {
             if (!strcmp(s->from, buf)) {
                 name = g_strdup(s->to);
                 break;
             }
         }
     }
 
     if (name == NULL) {
         name = g_ascii_strdown(buf, -1);
     }
 
     if (legacy_xml || simple_list) {
         pcmk__output_xml_create_parent(out, name, NULL);
     } else {
         pcmk__output_xml_create_parent(out, "list",
                                        "name", name,
                                        NULL);
     }
 
     g_free(name);
     free(buf);
 }
 
 G_GNUC_PRINTF(3, 4)
 static void
 xml_list_item(pcmk__output_t *out, const char *name, const char *format, ...) {
     xmlNodePtr item_node = NULL;
     va_list ap;
     char *buf = NULL;
     int len;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
     len = vasprintf(&buf, format, ap);
     CRM_ASSERT(len >= 0);
     va_end(ap);
 
     item_node = pcmk__output_create_xml_text_node(out, "item", buf);
 
     if (name != NULL) {
         crm_xml_add(item_node, "name", name);
     }
 
     free(buf);
 }
 
 static void
 xml_increment_list(pcmk__output_t *out) {
     /* This function intentially left blank */
 }
 
 static void
 xml_end_list(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     if (priv->legacy_xml || simple_list) {
         g_queue_pop_tail(priv->parent_q);
     } else {
         char *buf = NULL;
         xmlNodePtr node;
 
         node = g_queue_pop_tail(priv->parent_q);
         buf = crm_strdup_printf("%lu", xmlChildElementCount(node));
         crm_xml_add(node, "count", buf);
         free(buf);
     }
 }
 
 static bool
 xml_is_quiet(pcmk__output_t *out) {
     return false;
 }
 
 static void
 xml_spacer(pcmk__output_t *out) {
     /* This function intentionally left blank */
 }
 
 static void
 xml_progress(pcmk__output_t *out, bool end) {
     /* This function intentionally left blank */
 }
 
 pcmk__output_t *
 pcmk__mk_xml_output(char **argv) {
     pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
 
     if (retval == NULL) {
         return NULL;
     }
 
     retval->fmt_name = "xml";
     retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
 
     retval->init = xml_init;
     retval->free_priv = xml_free_priv;
     retval->finish = xml_finish;
     retval->reset = xml_reset;
 
     retval->register_message = pcmk__register_message;
     retval->message = pcmk__call_message;
 
     retval->subprocess_output = xml_subprocess_output;
     retval->version = xml_version;
     retval->info = xml_info;
     retval->err = xml_err;
     retval->output_xml = xml_output_xml;
 
     retval->begin_list = xml_begin_list;
     retval->list_item = xml_list_item;
     retval->increment_list = xml_increment_list;
     retval->end_list = xml_end_list;
 
     retval->is_quiet = xml_is_quiet;
     retval->spacer = xml_spacer;
     retval->progress = xml_progress;
     retval->prompt = pcmk__text_prompt;
 
     return retval;
 }
 
 xmlNodePtr
 pcmk__output_xml_create_parent(pcmk__output_t *out, const char *name, ...) {
     va_list args;
     xmlNodePtr node = NULL;
 
     CRM_ASSERT(out != NULL);
 
     node = pcmk__output_create_xml_node(out, name, NULL);
 
     va_start(args, name);
     pcmk__xe_set_propv(node, args);
     va_end(args);
 
     pcmk__output_xml_push_parent(out, node);
     return node;
 }
 
 void
 pcmk__output_xml_add_node(pcmk__output_t *out, xmlNodePtr node) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     CRM_ASSERT(node != NULL);
 
     if (!pcmk__str_any_of(out->fmt_name, "xml", "html", NULL)) {
         return;
     }
 
     priv = out->priv;
 
     xmlAddChild(g_queue_peek_tail(priv->parent_q), node);
 }
 
 xmlNodePtr
 pcmk__output_create_xml_node(pcmk__output_t *out, const char *name, ...) {
     xmlNodePtr node = NULL;
     private_data_t *priv = NULL;
     va_list args;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     CRM_ASSERT(pcmk__str_any_of(out->fmt_name, "xml", "html", NULL));
 
     priv = out->priv;
 
     node = create_xml_node(g_queue_peek_tail(priv->parent_q), name);
     va_start(args, name);
     pcmk__xe_set_propv(node, args);
     va_end(args);
 
     return node;
 }
 
 xmlNodePtr
 pcmk__output_create_xml_text_node(pcmk__output_t *out, const char *name, const char *content) {
     xmlNodePtr node = NULL;
 
     CRM_ASSERT(out != NULL);
 
     node = pcmk__output_create_xml_node(out, name, NULL);
     xmlNodeSetContent(node, (pcmkXmlStr) content);
     return node;
 }
 
 void
 pcmk__output_xml_push_parent(pcmk__output_t *out, xmlNodePtr parent) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     CRM_ASSERT(parent != NULL);
 
     if (!pcmk__str_any_of(out->fmt_name, "xml", "html", NULL)) {
         return;
     }
 
     priv = out->priv;
 
     g_queue_push_tail(priv->parent_q, parent);
 }
 
 void
 pcmk__output_xml_pop_parent(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
 
     if (!pcmk__str_any_of(out->fmt_name, "xml", "html", NULL)) {
         return;
     }
 
     priv = out->priv;
 
     CRM_ASSERT(g_queue_get_length(priv->parent_q) > 0);
     g_queue_pop_tail(priv->parent_q);
 }
 
 xmlNodePtr
 pcmk__output_xml_peek_parent(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     CRM_ASSERT(pcmk__str_any_of(out->fmt_name, "xml", "html", NULL));
 
     priv = out->priv;
 
     /* If queue is empty NULL will be returned */
     return g_queue_peek_tail(priv->parent_q);
 }
diff --git a/tools/crm_mon_curses.c b/tools/crm_mon_curses.c
index 5bad1048cf..c0ee1fde88 100644
--- a/tools/crm_mon_curses.c
+++ b/tools/crm_mon_curses.c
@@ -1,530 +1,531 @@
 /*
  * Copyright 2019-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <stdarg.h>
 #include <stdint.h>
 #include <stdlib.h>
 #include <crm/crm.h>
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/pengine/internal.h>
 #include <glib.h>
 #include <pacemaker-internal.h>
 
 #include "crm_mon.h"
 
 #if CURSES_ENABLED
 
 GOptionEntry crm_mon_curses_output_entries[] = {
     { NULL }
 };
 
 typedef struct curses_list_data_s {
     unsigned int len;
     char *singular_noun;
     char *plural_noun;
 } curses_list_data_t;
 
 typedef struct private_data_s {
     GQueue *parent_q;
 } private_data_t;
 
 static void
 curses_free_priv(pcmk__output_t *out) {
     private_data_t *priv = out->priv;
 
     if (priv == NULL) {
         return;
     }
 
     g_queue_free(priv->parent_q);
     free(priv);
     out->priv = NULL;
 }
 
 static bool
 curses_init(pcmk__output_t *out) {
     private_data_t *priv = NULL;
 
     /* If curses_init was previously called on this output struct, just return. */
     if (out->priv != NULL) {
         return true;
     } else {
         out->priv = calloc(1, sizeof(private_data_t));
         if (out->priv == NULL) {
             return false;
         }
 
         priv = out->priv;
     }
 
     priv->parent_q = g_queue_new();
 
     initscr();
     cbreak();
     noecho();
 
     return true;
 }
 
 static void
 curses_finish(pcmk__output_t *out, crm_exit_t exit_status, bool print, void **copy_dest) {
     CRM_ASSERT(out != NULL);
 
     echo();
     nocbreak();
     endwin();
 }
 
 static void
 curses_reset(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
 
     curses_free_priv(out);
     curses_init(out);
 }
 
 static void
 curses_subprocess_output(pcmk__output_t *out, int exit_status,
                          const char *proc_stdout, const char *proc_stderr) {
     CRM_ASSERT(out != NULL);
 
     if (proc_stdout != NULL) {
         printw("%s\n", proc_stdout);
     }
 
     if (proc_stderr != NULL) {
         printw("%s\n", proc_stderr);
     }
 
     clrtoeol();
     refresh();
 }
 
 /* curses_version is defined in curses.h, so we can't use that name here.
  * Note that this function prints out via text, not with curses.
  */
 static void
 curses_ver(pcmk__output_t *out, bool extended) {
     CRM_ASSERT(out != NULL);
 
     if (extended) {
         printf("Pacemaker %s (Build: %s): %s\n", PACEMAKER_VERSION, BUILD_VERSION, CRM_FEATURES);
     } else {
         printf("Pacemaker %s\n", PACEMAKER_VERSION);
-        printf("Written by Andrew Beekhof\n");
+        printf("Written by Andrew Beekhof and the "
+               "Pacemaker project contributors\n");
     }
 }
 
 G_GNUC_PRINTF(2, 3)
 static void
 curses_error(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     CRM_ASSERT(out != NULL);
 
     /* Informational output does not get indented, to separate it from other
      * potentially indented list output.
      */
     va_start(ap, format);
     vw_printw(stdscr, format, ap);
     va_end(ap);
 
     /* Add a newline. */
     addch('\n');
 
     clrtoeol();
     refresh();
     sleep(2);
 }
 
 G_GNUC_PRINTF(2, 3)
 static int
 curses_info(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     CRM_ASSERT(out != NULL);
 
     if (out->is_quiet(out)) {
         return pcmk_rc_no_output;
     }
 
     /* Informational output does not get indented, to separate it from other
      * potentially indented list output.
      */
     va_start(ap, format);
     vw_printw(stdscr, format, ap);
     va_end(ap);
 
     /* Add a newline. */
     addch('\n');
 
     clrtoeol();
     refresh();
     return pcmk_rc_ok;
 }
 
 static void
 curses_output_xml(pcmk__output_t *out, const char *name, const char *buf) {
     CRM_ASSERT(out != NULL);
     curses_indented_printf(out, "%s", buf);
 }
 
 G_GNUC_PRINTF(4, 5)
 static void
 curses_begin_list(pcmk__output_t *out, const char *singular_noun, const char *plural_noun,
                   const char *format, ...) {
     private_data_t *priv = NULL;
     curses_list_data_t *new_list = NULL;
     va_list ap;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     /* Empty formats can be used to create a new level of indentation, but without
      * displaying some sort of list header.  In that case we need to not do any of
      * this stuff. vw_printw will act weird if told to print a NULL.
      */
     if (format != NULL) {
         va_start(ap, format);
 
         curses_indented_vprintf(out, format, ap);
         printw(":\n");
 
         va_end(ap);
     }
 
     new_list = calloc(1, sizeof(curses_list_data_t));
     new_list->len = 0;
     pcmk__str_update(&new_list->singular_noun, singular_noun);
     pcmk__str_update(&new_list->plural_noun, plural_noun);
 
     g_queue_push_tail(priv->parent_q, new_list);
 }
 
 G_GNUC_PRINTF(3, 4)
 static void
 curses_list_item(pcmk__output_t *out, const char *id, const char *format, ...) {
     va_list ap;
 
     CRM_ASSERT(out != NULL);
 
     va_start(ap, format);
 
     if (id != NULL) {
         curses_indented_printf(out, "%s: ", id);
         vw_printw(stdscr, format, ap);
     } else {
         curses_indented_vprintf(out, format, ap);
     }
 
     addch('\n');
     va_end(ap);
 
     out->increment_list(out);
 }
 
 static void
 curses_increment_list(pcmk__output_t *out) {
     private_data_t *priv = NULL;
     gpointer tail;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     tail = g_queue_peek_tail(priv->parent_q);
     CRM_ASSERT(tail != NULL);
     ((curses_list_data_t *) tail)->len++;
 }
 
 static void
 curses_end_list(pcmk__output_t *out) {
     private_data_t *priv = NULL;
     curses_list_data_t *node = NULL;
 
     CRM_ASSERT(out != NULL && out->priv != NULL);
     priv = out->priv;
 
     node = g_queue_pop_tail(priv->parent_q);
 
     if (node->singular_noun != NULL && node->plural_noun != NULL) {
         if (node->len == 1) {
             curses_indented_printf(out, "%d %s found\n", node->len, node->singular_noun);
         } else {
             curses_indented_printf(out, "%d %s found\n", node->len, node->plural_noun);
         }
     }
 
     free(node);
 }
 
 static bool
 curses_is_quiet(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
     return out->quiet;
 }
 
 static void
 curses_spacer(pcmk__output_t *out) {
     CRM_ASSERT(out != NULL);
     addch('\n');
 }
 
 static void
 curses_progress(pcmk__output_t *out, bool end) {
     CRM_ASSERT(out != NULL);
 
     if (end) {
         printw(".\n");
     } else {
         addch('.');
     }
 }
 
 static void
 curses_prompt(const char *prompt, bool do_echo, char **dest)
 {
     int rc = OK;
 
     CRM_ASSERT(prompt != NULL);
     CRM_ASSERT(dest != NULL);
 
     /* This is backwards from the text version of this function on purpose.  We
      * disable echo by default in curses_init, so we need to enable it here if
      * asked for.
      */
     if (do_echo) {
         rc = echo();
     }
 
     if (rc == OK) {
         printw("%s: ", prompt);
 
         if (*dest != NULL) {
             free(*dest);
         }
 
         *dest = calloc(1, 1024);
         /* On older systems, scanw is defined as taking a char * for its first argument,
          * while newer systems rightly want a const char *.  Accomodate both here due
          * to building with -Werror.
          */
         rc = scanw((NCURSES_CONST char *) "%1023s", *dest);
         addch('\n');
     }
 
     if (rc < 1) {
         free(*dest);
         *dest = NULL;
     }
 
     if (do_echo) {
         noecho();
     }
 }
 
 pcmk__output_t *
 crm_mon_mk_curses_output(char **argv) {
     pcmk__output_t *retval = calloc(1, sizeof(pcmk__output_t));
 
     if (retval == NULL) {
         return NULL;
     }
 
     retval->fmt_name = "console";
     retval->request = argv == NULL ? NULL : g_strjoinv(" ", argv);
 
     retval->init = curses_init;
     retval->free_priv = curses_free_priv;
     retval->finish = curses_finish;
     retval->reset = curses_reset;
 
     retval->register_message = pcmk__register_message;
     retval->message = pcmk__call_message;
 
     retval->subprocess_output = curses_subprocess_output;
     retval->version = curses_ver;
     retval->err = curses_error;
     retval->info = curses_info;
     retval->output_xml = curses_output_xml;
 
     retval->begin_list = curses_begin_list;
     retval->list_item = curses_list_item;
     retval->increment_list = curses_increment_list;
     retval->end_list = curses_end_list;
 
     retval->is_quiet = curses_is_quiet;
     retval->spacer = curses_spacer;
     retval->progress = curses_progress;
     retval->prompt = curses_prompt;
 
     return retval;
 }
 
 G_GNUC_PRINTF(2, 0)
 void
 curses_formatted_vprintf(pcmk__output_t *out, const char *format, va_list args) {
     vw_printw(stdscr, format, args);
 
     clrtoeol();
     refresh();
 }
 
 G_GNUC_PRINTF(2, 3)
 void
 curses_formatted_printf(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     va_start(ap, format);
     curses_formatted_vprintf(out, format, ap);
     va_end(ap);
 }
 
 G_GNUC_PRINTF(2, 0)
 void
 curses_indented_vprintf(pcmk__output_t *out, const char *format, va_list args) {
     int level = 0;
     private_data_t *priv = out->priv;
 
     CRM_ASSERT(priv != NULL);
 
     level = g_queue_get_length(priv->parent_q);
 
     for (int i = 0; i < level; i++) {
         printw("  ");
     }
 
     if (level > 0) {
         printw("* ");
     }
 
     curses_formatted_vprintf(out, format, args);
 }
 
 G_GNUC_PRINTF(2, 3)
 void
 curses_indented_printf(pcmk__output_t *out, const char *format, ...) {
     va_list ap;
 
     va_start(ap, format);
     curses_indented_vprintf(out, format, ap);
     va_end(ap);
 }
 
 PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int")
 static int
 cluster_maint_mode_console(pcmk__output_t *out, va_list args) {
     unsigned long long flags = va_arg(args, unsigned long long);
 
     if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
         curses_formatted_printf(out, "\n              *** Resource management is DISABLED ***\n");
         curses_formatted_printf(out, "  The cluster will not attempt to start, stop or recover services\n");
         return pcmk_rc_ok;
     } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
         curses_formatted_printf(out, "\n    *** Resource management is DISABLED ***\n");
         curses_formatted_printf(out, "  The cluster will keep all resources stopped\n");
         return pcmk_rc_ok;
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-status", "pe_working_set_t *", "crm_exit_t",
                   "stonith_history_t *", "gboolean", "uint32_t", "uint32_t",
                   "const char *", "GList *", "GList *")
 static int
 cluster_status_console(pcmk__output_t *out, va_list args) {
     int rc = pcmk_rc_no_output;
 
     blank_screen();
     rc = pcmk__cluster_status_text(out, args);
     refresh();
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("stonith-event", "stonith_history_t *", "gboolean", "gboolean")
 static int
 stonith_event_console(pcmk__output_t *out, va_list args) {
     stonith_history_t *event = va_arg(args, stonith_history_t *);
     gboolean full_history = va_arg(args, gboolean);
     gboolean later_succeeded = va_arg(args, gboolean);
 
     crm_time_t *crm_when = crm_time_new(NULL);
     char *buf = NULL;
 
     crm_time_set_timet(crm_when, &(event->completed));
     buf = crm_time_as_string(crm_when, crm_time_log_date | crm_time_log_timeofday | crm_time_log_with_timezone);
 
     switch (event->state) {
         case st_failed:
             curses_indented_printf(out,
                                    "%s of %s failed%s%s%s: "
                                    "delegate=%s, client=%s, origin=%s, %s='%s' %s\n",
                                    stonith_action_str(event->action), event->target,
                                    (event->exit_reason == NULL)? "" : " (",
                                    (event->exit_reason == NULL)? "" : event->exit_reason,
                                    (event->exit_reason == NULL)? "" : ")",
                                    event->delegate ? event->delegate : "",
                                    event->client, event->origin,
                                    full_history ? "completed" : "last-failed", buf,
                                    later_succeeded ? " (a later attempt succeeded)" : "");
             break;
 
         case st_done:
             curses_indented_printf(out, "%s of %s successful: delegate=%s, client=%s, origin=%s, %s='%s'\n",
                                    stonith_action_str(event->action), event->target,
                                    event->delegate ? event->delegate : "",
                                    event->client, event->origin,
                                    full_history ? "completed" : "last-successful", buf);
             break;
 
         default:
             curses_indented_printf(out, "%s of %s pending: client=%s, origin=%s\n",
                                    stonith_action_str(event->action), event->target,
                                    event->client, event->origin);
             break;
     }
 
     free(buf);
     crm_time_free(crm_when);
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
     { "cluster-status", "console", cluster_status_console },
     { "maint-mode", "console", cluster_maint_mode_console },
     { "stonith-event", "console", stonith_event_console },
 
     { NULL, NULL, NULL }
 };
 
 #endif
 
 void
 crm_mon_register_messages(pcmk__output_t *out) {
 #if CURSES_ENABLED
     pcmk__register_messages(out, fmt_functions);
 #endif
 }
 
 void
 blank_screen(void)
 {
 #if CURSES_ENABLED
     int lpc = 0;
 
     for (lpc = 0; lpc < LINES; lpc++) {
         move(lpc, 0);
         clrtoeol();
     }
     move(0, 0);
     refresh();
 #endif
 }