diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c
index f09a2de781..480a2a8917 100644
--- a/daemons/fenced/fenced_remote.c
+++ b/daemons/fenced/fenced_remote.c
@@ -1,2532 +1,2532 @@
 /*
  * Copyright 2009-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 #include <regex.h>
 
 #include <crm/crm.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/util.h>
 #include <pacemaker-fenced.h>
 
 #define TIMEOUT_MULTIPLY_FACTOR 1.2
 
 /* When one fencer queries its peers for devices able to handle a fencing
  * request, each peer will reply with a list of such devices available to it.
  * Each reply will be parsed into a peer_device_info_t, with each device's
  * information kept in a device_properties_t.
  */
 
 typedef struct device_properties_s {
     /* Whether access to this device has been verified */
     gboolean verified;
 
     /* The remaining members are indexed by the operation's "phase" */
 
     /* Whether this device has been executed in each phase */
     gboolean executed[st_phase_max];
     /* Whether this device is disallowed from executing in each phase */
     gboolean disallowed[st_phase_max];
     /* Action-specific timeout for each phase */
     int custom_action_timeout[st_phase_max];
     /* Action-specific maximum random delay for each phase */
     int delay_max[st_phase_max];
     /* Action-specific base delay for each phase */
     int delay_base[st_phase_max];
     /* Group of enum st_device_flags */
     uint32_t device_support_flags;
 } device_properties_t;
 
 typedef struct {
     /* Name of peer that sent this result */
     char *host;
     /* Only try peers for non-topology based operations once */
     gboolean tried;
     /* Number of entries in the devices table */
     int ndevices;
     /* Devices available to this host that are capable of fencing the target */
     GHashTable *devices;
 } peer_device_info_t;
 
 GHashTable *stonith_remote_op_list = NULL;
 
 extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
                                   int call_options);
 
 static void request_peer_fencing(remote_fencing_op_t *op,
                                  peer_device_info_t *peer);
 static void finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup);
 static void report_timeout_period(remote_fencing_op_t * op, int op_timeout);
 static int get_op_total_timeout(const remote_fencing_op_t *op,
                                 const peer_device_info_t *chosen_peer);
 
 static gint
 sort_strings(gconstpointer a, gconstpointer b)
 {
     return strcmp(a, b);
 }
 
 static void
 free_remote_query(gpointer data)
 {
     if (data != NULL) {
         peer_device_info_t *peer = data;
 
         g_hash_table_destroy(peer->devices);
         free(peer->host);
         free(peer);
     }
 }
 
 void
 free_stonith_remote_op_list(void)
 {
     if (stonith_remote_op_list != NULL) {
         g_hash_table_destroy(stonith_remote_op_list);
         stonith_remote_op_list = NULL;
     }
 }
 
 struct peer_count_data {
     const remote_fencing_op_t *op;
     gboolean verified_only;
     uint32_t support_action_only;
     int count;
 };
 
 /*!
  * \internal
  * \brief Increment a counter if a device has not been executed yet
  *
  * \param[in]     key        Device ID (ignored)
  * \param[in]     value      Device properties
  * \param[in,out] user_data  Peer count data
  */
 static void
 count_peer_device(gpointer key, gpointer value, gpointer user_data)
 {
     device_properties_t *props = (device_properties_t*)value;
     struct peer_count_data *data = user_data;
 
     if (!props->executed[data->op->phase]
         && (!data->verified_only || props->verified)
         && ((data->support_action_only == st_device_supports_none) || pcmk_is_set(props->device_support_flags, data->support_action_only))) {
         ++(data->count);
     }
 }
 
 /*!
  * \internal
  * \brief Check the number of available devices in a peer's query results
  *
  * \param[in] op             Operation that results are for
  * \param[in] peer           Peer to count
  * \param[in] verified_only  Whether to count only verified devices
  * \param[in] support_action_only Whether to count only devices that support action
  *
  * \return Number of devices available to peer that were not already executed
  */
 static int
 count_peer_devices(const remote_fencing_op_t *op,
                    const peer_device_info_t *peer, gboolean verified_only, uint32_t support_on_action_only)
 {
     struct peer_count_data data;
 
     data.op = op;
     data.verified_only = verified_only;
     data.support_action_only = support_on_action_only;
     data.count = 0;
     if (peer) {
         g_hash_table_foreach(peer->devices, count_peer_device, &data);
     }
     return data.count;
 }
 
 /*!
  * \internal
  * \brief Search for a device in a query result
  *
  * \param[in] op      Operation that result is for
  * \param[in] peer    Query result for a peer
  * \param[in] device  Device ID to search for
  *
  * \return Device properties if found, NULL otherwise
  */
 static device_properties_t *
 find_peer_device(const remote_fencing_op_t *op, const peer_device_info_t *peer,
                  const char *device, uint32_t support_action_only)
 {
     device_properties_t *props = g_hash_table_lookup(peer->devices, device);
 
     if (props && support_action_only != st_device_supports_none && !pcmk_is_set(props->device_support_flags, support_action_only)) {
         return NULL;
     }
     return (props && !props->executed[op->phase]
            && !props->disallowed[op->phase])? props : NULL;
 }
 
 /*!
  * \internal
  * \brief Find a device in a peer's device list and mark it as executed
  *
  * \param[in]     op                     Operation that peer result is for
  * \param[in,out] peer                   Peer with results to search
  * \param[in]     device                 ID of device to mark as done
  * \param[in]     verified_devices_only  Only consider verified devices
  *
  * \return TRUE if device was found and marked, FALSE otherwise
  */
 static gboolean
 grab_peer_device(const remote_fencing_op_t *op, peer_device_info_t *peer,
                  const char *device, gboolean verified_devices_only)
 {
     device_properties_t *props = find_peer_device(op, peer, device,
                                                   fenced_support_flag(op->action));
 
     if ((props == NULL) || (verified_devices_only && !props->verified)) {
         return FALSE;
     }
 
     crm_trace("Removing %s from %s (%d remaining)",
               device, peer->host, count_peer_devices(op, peer, FALSE, st_device_supports_none));
     props->executed[op->phase] = TRUE;
     return TRUE;
 }
 
 static void
 clear_remote_op_timers(remote_fencing_op_t * op)
 {
     if (op->query_timer) {
         g_source_remove(op->query_timer);
         op->query_timer = 0;
     }
     if (op->op_timer_total) {
         g_source_remove(op->op_timer_total);
         op->op_timer_total = 0;
     }
     if (op->op_timer_one) {
         g_source_remove(op->op_timer_one);
         op->op_timer_one = 0;
     }
 }
 
 static void
 free_remote_op(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     crm_log_xml_debug(op->request, "Destroying");
 
     clear_remote_op_timers(op);
 
     free(op->id);
     free(op->action);
     free(op->delegate);
     free(op->target);
     free(op->client_id);
     free(op->client_name);
     free(op->originator);
 
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
     }
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     g_list_free_full(op->automatic_list, free);
     g_list_free(op->duplicates);
 
     pcmk__reset_result(&op->result);
     free(op);
 }
 
 void
 init_stonith_remote_op_hash_table(GHashTable **table)
 {
     if (*table == NULL) {
         *table = pcmk__strkey_table(NULL, free_remote_op);
     }
 }
 
 /*!
  * \internal
  * \brief Return an operation's originally requested action (before any remap)
  *
  * \param[in] op  Operation to check
  *
  * \return Operation's original action
  */
 static const char *
 op_requested_action(const remote_fencing_op_t *op)
 {
     return ((op->phase > st_phase_requested)? PCMK_ACTION_REBOOT : op->action);
 }
 
 /*!
  * \internal
  * \brief Remap a "reboot" operation to the "off" phase
  *
  * \param[in,out] op      Operation to remap
  */
 static void
 op_phase_off(remote_fencing_op_t *op)
 {
     crm_info("Remapping multiple-device reboot targeting %s to 'off' "
              CRM_XS " id=%.8s", op->target, op->id);
     op->phase = st_phase_off;
 
     /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the
      * memory allocation at each phase.
      */
     strcpy(op->action, PCMK_ACTION_OFF);
 }
 
 /*!
  * \internal
  * \brief Advance a remapped reboot operation to the "on" phase
  *
  * \param[in,out] op  Operation to remap
  */
 static void
 op_phase_on(remote_fencing_op_t *op)
 {
     GList *iter = NULL;
 
     crm_info("Remapped 'off' targeting %s complete, "
              "remapping to 'on' for %s " CRM_XS " id=%.8s",
              op->target, op->client_name, op->id);
     op->phase = st_phase_on;
     strcpy(op->action, PCMK_ACTION_ON);
 
     /* Skip devices with automatic unfencing, because the cluster will handle it
      * when the node rejoins.
      */
     for (iter = op->automatic_list; iter != NULL; iter = iter->next) {
         GList *match = g_list_find_custom(op->devices_list, iter->data,
                                             sort_strings);
 
         if (match) {
             op->devices_list = g_list_remove(op->devices_list, match->data);
         }
     }
     g_list_free_full(op->automatic_list, free);
     op->automatic_list = NULL;
 
     /* Rewind device list pointer */
     op->devices = op->devices_list;
 }
 
 /*!
  * \internal
  * \brief Reset a remapped reboot operation
  *
  * \param[in,out] op  Operation to reset
  */
 static void
 undo_op_remap(remote_fencing_op_t *op)
 {
     if (op->phase > 0) {
         crm_info("Undoing remap of reboot targeting %s for %s "
                  CRM_XS " id=%.8s", op->target, op->client_name, op->id);
         op->phase = st_phase_requested;
         strcpy(op->action, PCMK_ACTION_REBOOT);
     }
 }
 
 /*!
  * \internal
  * \brief Create notification data XML for a fencing operation result
  *
  * \param[in] op      Fencer operation that completed
  *
  * \return Newly created XML to add as notification data
  * \note The caller is responsible for freeing the result.
  */
 static xmlNode *
 fencing_result2xml(const remote_fencing_op_t *op)
 {
     xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
     crm_xml_add_int(notify_data, PCMK_XA_STATE, op->state);
     crm_xml_add(notify_data, PCMK__XA_ST_TARGET, op->target);
     crm_xml_add(notify_data, F_STONITH_ACTION, op->action);
     crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate);
     crm_xml_add(notify_data, PCMK__XA_ST_REMOTE_OP, op->id);
     crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(notify_data, PCMK__XA_ST_CLIENTID, op->client_id);
     crm_xml_add(notify_data, PCMK__XA_ST_CLIENTNAME, op->client_name);
 
     return notify_data;
 }
 
 /*!
  * \internal
  * \brief Broadcast a fence result notification to all CPG peers
  *
  * \param[in] op         Fencer operation that completed
  * \param[in] op_merged  Whether this operation is a duplicate of another
  */
 void
 fenced_broadcast_op_result(const remote_fencing_op_t *op, bool op_merged)
 {
     static int count = 0;
     xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY);
     xmlNode *notify_data = fencing_result2xml(op);
 
     count++;
     crm_trace("Broadcasting result to peers");
     crm_xml_add(bcast, PCMK__XA_T, T_STONITH_NOTIFY);
     crm_xml_add(bcast, PCMK__XA_SUBT, "broadcast");
     crm_xml_add(bcast, PCMK__XA_ST_OP, T_STONITH_NOTIFY);
     crm_xml_add_int(bcast, PCMK_XA_COUNT, count);
 
     if (op_merged) {
         pcmk__xe_set_bool_attr(bcast, F_STONITH_MERGED, true);
     }
 
     stonith__xe_set_result(notify_data, &op->result);
 
     add_message_xml(bcast, PCMK__XA_ST_CALLDATA, notify_data);
     send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
     free_xml(notify_data);
     free_xml(bcast);
 
     return;
 }
 
 /*!
  * \internal
  * \brief Reply to a local request originator and notify all subscribed clients
  *
  * \param[in,out] op    Fencer operation that completed
  * \param[in,out] data  Top-level XML to add notification to
  */
 static void
 handle_local_reply_and_notify(remote_fencing_op_t *op, xmlNode *data)
 {
     xmlNode *notify_data = NULL;
     xmlNode *reply = NULL;
     pcmk__client_t *client = NULL;
 
     if (op->notify_sent == TRUE) {
         /* nothing to do */
         return;
     }
 
     /* Do notification with a clean data object */
     crm_xml_add_int(data, PCMK_XA_STATE, op->state);
     crm_xml_add(data, PCMK__XA_ST_TARGET, op->target);
     crm_xml_add(data, PCMK__XA_ST_OP, op->action);
 
     reply = fenced_construct_reply(op->request, data, &op->result);
     crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate);
 
     /* Send fencing OP reply to local client that initiated fencing */
     client = pcmk__find_client_by_id(op->client_id);
     if (client == NULL) {
         crm_trace("Skipping reply to %s: no longer a client", op->client_id);
     } else {
         do_local_reply(reply, client, op->call_options);
     }
 
     /* bcast to all local clients that the fencing operation happend */
     notify_data = fencing_result2xml(op);
     fenced_send_notification(T_STONITH_NOTIFY_FENCE, &op->result, notify_data);
     free_xml(notify_data);
     fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL);
 
     /* mark this op as having notify's already sent */
     op->notify_sent = TRUE;
     free_xml(reply);
 }
 
 /*!
  * \internal
  * \brief Finalize all duplicates of a given fencer operation
  *
  * \param[in,out] op    Fencer operation that completed
  * \param[in,out] data  Top-level XML to add notification to
  */
 static void
 finalize_op_duplicates(remote_fencing_op_t *op, xmlNode *data)
 {
     for (GList *iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *other = iter->data;
 
         if (other->state == st_duplicate) {
             other->state = op->state;
             crm_debug("Performing duplicate notification for %s@%s: %s "
                       CRM_XS " id=%.8s",
                       other->client_name, other->originator,
                       pcmk_exec_status_str(op->result.execution_status),
                       other->id);
             pcmk__copy_result(&op->result, &other->result);
             finalize_op(other, data, true);
 
         } else {
             // Possible if (for example) it timed out already
             crm_err("Skipping duplicate notification for %s@%s "
                     CRM_XS " state=%s id=%.8s",
                     other->client_name, other->originator,
                     stonith_op_state_str(other->state), other->id);
         }
     }
 }
 
 static char *
 delegate_from_xml(xmlNode *xml)
 {
     xmlNode *match = get_xpath_object("//@" F_STONITH_DELEGATE, xml, LOG_NEVER);
 
     if (match == NULL) {
         return crm_element_value_copy(xml, PCMK__XA_SRC);
     } else {
         return crm_element_value_copy(match, F_STONITH_DELEGATE);
     }
 }
 
 /*!
  * \internal
  * \brief Finalize a peer fencing operation
  *
  * Clean up after a fencing operation completes. This function has two code
  * paths: the executioner uses it to broadcast the result to CPG peers, and then
  * each peer (including the executioner) uses it to process that broadcast and
  * notify its IPC clients of the result.
  *
  * \param[in,out] op      Fencer operation that completed
  * \param[in,out] data    If not NULL, XML reply of last delegated operation
  * \param[in]     dup     Whether this operation is a duplicate of another
  *                        (in which case, do not broadcast the result)
  *
  *  \note The operation result should be set before calling this function.
  */
 static void
 finalize_op(remote_fencing_op_t *op, xmlNode *data, bool dup)
 {
     int level = LOG_ERR;
     const char *subt = NULL;
     xmlNode *local_data = NULL;
     gboolean op_merged = FALSE;
 
     CRM_CHECK((op != NULL), return);
 
     // This is a no-op if timers have already been cleared
     clear_remote_op_timers(op);
 
     if (op->notify_sent) {
         // Most likely, this is a timed-out action that eventually completed
         crm_notice("Operation '%s'%s%s by %s for %s@%s%s: "
                    "Result arrived too late " CRM_XS " id=%.8s",
                    op->action, (op->target? " targeting " : ""),
                    (op->target? op->target : ""),
                    (op->delegate? op->delegate : "unknown node"),
                    op->client_name, op->originator,
                    (op_merged? " (merged)" : ""),
                    op->id);
         return;
     }
 
     set_fencing_completed(op);
     undo_op_remap(op);
 
     if (data == NULL) {
         data = create_xml_node(NULL, "remote-op");
         local_data = data;
 
     } else if (op->delegate == NULL) {
         switch (op->result.execution_status) {
             case PCMK_EXEC_NO_FENCE_DEVICE:
                 break;
 
             case PCMK_EXEC_INVALID:
                 if (op->result.exit_status != CRM_EX_EXPIRED) {
                     op->delegate = delegate_from_xml(data);
                 }
                 break;
 
             default:
                 op->delegate = delegate_from_xml(data);
                 break;
         }
     }
 
     if (dup || (crm_element_value(data, F_STONITH_MERGED) != NULL)) {
         op_merged = true;
     }
 
     /* Tell everyone the operation is done, we will continue
      * with doing the local notifications once we receive
      * the broadcast back. */
     subt = crm_element_value(data, PCMK__XA_SUBT);
     if (!dup && !pcmk__str_eq(subt, "broadcast", pcmk__str_casei)) {
         /* Defer notification until the bcast message arrives */
         fenced_broadcast_op_result(op, op_merged);
         free_xml(local_data);
         return;
     }
 
     if (pcmk__result_ok(&op->result) || dup
         || !pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) {
         level = LOG_NOTICE;
     }
     do_crm_log(level, "Operation '%s'%s%s by %s for %s@%s%s: %s (%s%s%s) "
                CRM_XS " id=%.8s", op->action, (op->target? " targeting " : ""),
                (op->target? op->target : ""),
                (op->delegate? op->delegate : "unknown node"),
                op->client_name, op->originator,
                (op_merged? " (merged)" : ""),
                crm_exit_str(op->result.exit_status),
                pcmk_exec_status_str(op->result.execution_status),
                ((op->result.exit_reason == NULL)? "" : ": "),
                ((op->result.exit_reason == NULL)? "" : op->result.exit_reason),
                op->id);
 
     handle_local_reply_and_notify(op, data);
 
     if (!dup) {
         finalize_op_duplicates(op, data);
     }
 
     /* Free non-essential parts of the record
      * Keep the record around so we can query the history
      */
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
         op->query_results = NULL;
     }
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
 
     free_xml(local_data);
 }
 
 /*!
  * \internal
  * \brief Finalize a watchdog fencer op after the waiting time expires
  *
  * \param[in,out] userdata  Fencer operation that completed
  *
  * \return G_SOURCE_REMOVE (which tells glib not to restart timer)
  */
 static gboolean
 remote_op_watchdog_done(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Self-fencing (%s) by %s for %s assumed complete "
                CRM_XS " id=%.8s",
                op->action, op->target, op->client_name, op->id);
     op->state = st_done;
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
     finalize_op(op, NULL, false);
     return G_SOURCE_REMOVE;
 }
 
 static gboolean
 remote_op_timeout_one(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Peer's '%s' action targeting %s for client %s timed out " CRM_XS
                " id=%.8s", op->action, op->target, op->client_name, op->id);
     pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT,
                      "Peer did not return fence result within timeout");
 
     // The requested delay has been applied for the first device
     if (op->client_delay > 0) {
         op->client_delay = 0;
         crm_trace("Try another device for '%s' action targeting %s "
                   "for client %s without delay " CRM_XS " id=%.8s",
                   op->action, op->target, op->client_name, op->id);
     }
 
     // Try another device, if appropriate
     request_peer_fencing(op, NULL);
     return G_SOURCE_REMOVE;
 }
 
 /*!
  * \internal
  * \brief Finalize a remote fencer operation that timed out
  *
  * \param[in,out] op      Fencer operation that timed out
  * \param[in]     reason  Readable description of what step timed out
  */
 static void
 finalize_timed_out_op(remote_fencing_op_t *op, const char *reason)
 {
     crm_debug("Action '%s' targeting %s for client %s timed out "
               CRM_XS " id=%.8s",
               op->action, op->target, op->client_name, op->id);
 
     if (op->phase == st_phase_on) {
         /* A remapped reboot operation timed out in the "on" phase, but the
          * "off" phase completed successfully, so quit trying any further
          * devices, and return success.
          */
         op->state = st_done;
         pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
     } else {
         op->state = st_failed;
         pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_TIMEOUT, reason);
     }
     finalize_op(op, NULL, false);
 }
 
 /*!
  * \internal
  * \brief Finalize a remote fencer operation that timed out
  *
  * \param[in,out] userdata  Fencer operation that timed out
  *
  * \return G_SOURCE_REMOVE (which tells glib not to restart timer)
  */
 static gboolean
 remote_op_timeout(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_total = 0;
 
     if (op->state == st_done) {
         crm_debug("Action '%s' targeting %s for client %s already completed "
                   CRM_XS " id=%.8s",
                   op->action, op->target, op->client_name, op->id);
     } else {
         finalize_timed_out_op(userdata, "Fencing did not complete within a "
                                         "total timeout based on the "
                                         "configured timeout and retries for "
                                         "any devices attempted");
     }
     return G_SOURCE_REMOVE;
 }
 
 static gboolean
 remote_op_query_timeout(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     op->query_timer = 0;
 
     if (op->state == st_done) {
         crm_debug("Operation %.8s targeting %s already completed",
                   op->id, op->target);
     } else if (op->state == st_exec) {
         crm_debug("Operation %.8s targeting %s already in progress",
                   op->id, op->target);
     } else if (op->query_results) {
         // Query succeeded, so attempt the actual fencing
         crm_debug("Query %.8s targeting %s complete (state=%s)",
                   op->id, op->target, stonith_op_state_str(op->state));
         request_peer_fencing(op, NULL);
     } else {
         crm_debug("Query %.8s targeting %s timed out (state=%s)",
                   op->id, op->target, stonith_op_state_str(op->state));
         finalize_timed_out_op(op, "No capable peers replied to device query "
                                   "within timeout");
     }
 
     return G_SOURCE_REMOVE;
 }
 
 static gboolean
 topology_is_empty(stonith_topology_t *tp)
 {
     int i;
 
     if (tp == NULL) {
         return TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         if (tp->levels[i] != NULL) {
             return FALSE;
         }
     }
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Add a device to an operation's automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to add
  */
 static void
 add_required_device(remote_fencing_op_t *op, const char *device)
 {
     GList *match  = g_list_find_custom(op->automatic_list, device,
                                          sort_strings);
 
     if (!match) {
         op->automatic_list = g_list_prepend(op->automatic_list, strdup(device));
     }
 }
 
 /*!
  * \internal
  * \brief Remove a device from the automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to remove
  */
 static void
 remove_required_device(remote_fencing_op_t *op, const char *device)
 {
     GList *match = g_list_find_custom(op->automatic_list, device,
                                         sort_strings);
 
     if (match) {
         op->automatic_list = g_list_remove(op->automatic_list, match->data);
     }
 }
 
 /* deep copy the device list */
 static void
 set_op_device_list(remote_fencing_op_t * op, GList *devices)
 {
     GList *lpc = NULL;
 
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     for (lpc = devices; lpc != NULL; lpc = lpc->next) {
         op->devices_list = g_list_append(op->devices_list, strdup(lpc->data));
     }
     op->devices = op->devices_list;
 }
 
 /*!
  * \internal
  * \brief Check whether a node matches a topology target
  *
  * \param[in] tp    Topology table entry to check
  * \param[in] node  Name of node to check
  *
  * \return TRUE if node matches topology target
  */
 static gboolean
 topology_matches(const stonith_topology_t *tp, const char *node)
 {
     regex_t r_patt;
 
     CRM_CHECK(node && tp && tp->target, return FALSE);
     switch (tp->kind) {
         case fenced_target_by_attribute:
             /* This level targets by attribute, so tp->target is a NAME=VALUE pair
              * of a permanent attribute applied to targeted nodes. The test below
              * relies on the locally cached copy of the CIB, so if fencing needs to
              * be done before the initial CIB is received or after a malformed CIB
              * is received, then the topology will be unable to be used.
              */
             if (node_has_attr(node, tp->target_attribute, tp->target_value)) {
                 crm_notice("Matched %s with %s by attribute", node, tp->target);
                 return TRUE;
             }
             break;
 
         case fenced_target_by_pattern:
             /* This level targets node names matching a pattern, so tp->target
              * (and tp->target_pattern) is a regular expression.
              */
             if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) {
                 crm_info("Bad regex '%s' for fencing level", tp->target);
             } else {
                 int status = regexec(&r_patt, node, 0, NULL, 0);
 
                 regfree(&r_patt);
                 if (status == 0) {
                     crm_notice("Matched %s with %s by name", node, tp->target);
                     return TRUE;
                 }
             }
             break;
 
         case fenced_target_by_name:
             crm_trace("Testing %s against %s", node, tp->target);
             return pcmk__str_eq(tp->target, node, pcmk__str_casei);
 
         default:
             break;
     }
     crm_trace("No match for %s with %s", node, tp->target);
     return FALSE;
 }
 
 stonith_topology_t *
 find_topology_for_host(const char *host) 
 {
     GHashTableIter tIter;
     stonith_topology_t *tp = g_hash_table_lookup(topology, host);
 
     if(tp != NULL) {
         crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
         return tp;
     }
 
     g_hash_table_iter_init(&tIter, topology);
     while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) {
         if (topology_matches(tp, host)) {
             crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
             return tp;
         }
     }
 
     crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology));
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Set fencing operation's device list to target's next topology level
  *
  * \param[in,out] op        Remote fencing operation to modify
  * \param[in]     empty_ok  If true, an operation without a target (i.e.
  *                          queries) or a target without a topology will get a
  *                          pcmk_rc_ok return value instead of ENODEV
  *
  * \return Standard Pacemaker return value
  */
 static int
 advance_topology_level(remote_fencing_op_t *op, bool empty_ok)
 {
     stonith_topology_t *tp = NULL;
 
     if (op->target) {
         tp = find_topology_for_host(op->target);
     }
     if (topology_is_empty(tp)) {
         return empty_ok? pcmk_rc_ok : ENODEV;
     }
 
     CRM_ASSERT(tp->levels != NULL);
 
     stonith__set_call_options(op->call_options, op->id, st_opt_topology);
 
     /* This is a new level, so undo any remapping left over from previous */
     undo_op_remap(op);
 
     do {
         op->level++;
 
     } while (op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL);
 
     if (op->level < ST_LEVEL_MAX) {
         crm_trace("Attempting fencing level %d targeting %s (%d devices) "
                   "for client %s@%s (id=%.8s)",
                   op->level, op->target, g_list_length(tp->levels[op->level]),
                   op->client_name, op->originator, op->id);
         set_op_device_list(op, tp->levels[op->level]);
 
         // The requested delay has been applied for the first fencing level
         if ((op->level > 1) && (op->client_delay > 0)) {
             op->client_delay = 0;
         }
 
         if ((g_list_next(op->devices_list) != NULL)
             && pcmk__str_eq(op->action, PCMK_ACTION_REBOOT, pcmk__str_none)) {
             /* A reboot has been requested for a topology level with multiple
              * devices. Instead of rebooting the devices sequentially, we will
              * turn them all off, then turn them all on again. (Think about
              * switched power outlets for redundant power supplies.)
              */
             op_phase_off(op);
         }
         return pcmk_rc_ok;
     }
 
     crm_info("All %sfencing options targeting %s for client %s@%s failed "
              CRM_XS " id=%.8s",
              (stonith_watchdog_timeout_ms > 0)?"non-watchdog ":"",
              op->target, op->client_name, op->originator, op->id);
     return ENODEV;
 }
 
 /*!
  * \internal
  * \brief If fencing operation is a duplicate, merge it into the other one
  *
  * \param[in,out] op  Fencing operation to check
  */
 static void
 merge_duplicates(remote_fencing_op_t *op)
 {
     GHashTableIter iter;
     remote_fencing_op_t *other = NULL;
 
     time_t now = time(NULL);
 
     g_hash_table_iter_init(&iter, stonith_remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) {
         const char *other_action = op_requested_action(other);
 
         if (!strcmp(op->id, other->id)) {
             continue; // Don't compare against self
         }
         if (other->state > st_exec) {
             crm_trace("%.8s not duplicate of %.8s: not in progress",
                       op->id, other->id);
             continue;
         }
         if (!pcmk__str_eq(op->target, other->target, pcmk__str_casei)) {
             crm_trace("%.8s not duplicate of %.8s: node %s vs. %s",
                       op->id, other->id, op->target, other->target);
             continue;
         }
         if (!pcmk__str_eq(op->action, other_action, pcmk__str_none)) {
             crm_trace("%.8s not duplicate of %.8s: action %s vs. %s",
                       op->id, other->id, op->action, other_action);
             continue;
         }
         if (pcmk__str_eq(op->client_name, other->client_name, pcmk__str_casei)) {
             crm_trace("%.8s not duplicate of %.8s: same client %s",
                       op->id, other->id, op->client_name);
             continue;
         }
         if (pcmk__str_eq(other->target, other->originator, pcmk__str_casei)) {
             crm_trace("%.8s not duplicate of %.8s: suicide for %s",
                       op->id, other->id, other->target);
             continue;
         }
         if (!fencing_peer_active(pcmk__get_node(0, other->originator, NULL,
                                                 pcmk__node_search_cluster))) {
             crm_notice("Failing action '%s' targeting %s originating from "
                        "client %s@%s: Originator is dead " CRM_XS " id=%.8s",
                        other->action, other->target, other->client_name,
                        other->originator, other->id);
             crm_trace("%.8s not duplicate of %.8s: originator dead",
                       op->id, other->id);
             other->state = st_failed;
             continue;
         }
         if ((other->total_timeout > 0)
             && (now > (other->total_timeout + other->created))) {
             crm_trace("%.8s not duplicate of %.8s: old (%ld vs. %ld + %d)",
                       op->id, other->id, now, other->created,
                       other->total_timeout);
             continue;
         }
 
         /* There is another in-flight request to fence the same host
          * Piggyback on that instead.  If it fails, so do we.
          */
         other->duplicates = g_list_append(other->duplicates, op);
         if (other->total_timeout == 0) {
             other->total_timeout = op->total_timeout =
                 TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL);
             crm_trace("Best guess as to timeout used for %.8s: %d",
                       other->id, other->total_timeout);
         }
         crm_notice("Merging fencing action '%s' targeting %s originating from "
                    "client %s with identical request from %s@%s "
                    CRM_XS " original=%.8s duplicate=%.8s total_timeout=%ds",
                    op->action, op->target, op->client_name,
                    other->client_name, other->originator,
                    op->id, other->id, other->total_timeout);
         report_timeout_period(op, other->total_timeout);
         op->state = st_duplicate;
     }
 }
 
 static uint32_t fencing_active_peers(void)
 {
     uint32_t count = 0;
     crm_node_t *entry;
     GHashTableIter gIter;
 
     g_hash_table_iter_init(&gIter, crm_peer_cache);
     while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
         if(fencing_peer_active(entry)) {
             count++;
         }
     }
     return count;
 }
 
 /*!
  * \internal
  * \brief Process a manual confirmation of a pending fence action
  *
  * \param[in]     client  IPC client that sent confirmation
  * \param[in,out] msg     Request XML with manual confirmation
  *
  * \return Standard Pacemaker return code
  */
 int
 fenced_handle_manual_confirmation(const pcmk__client_t *client, xmlNode *msg)
 {
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return EPROTO);
 
     crm_notice("Received manual confirmation that %s has been fenced",
                pcmk__s(crm_element_value(dev, PCMK__XA_ST_TARGET),
                        "unknown target"));
     op = initiate_remote_stonith_op(client, msg, TRUE);
     if (op == NULL) {
         return EPROTO;
     }
     op->state = st_done;
     set_fencing_completed(op);
     op->delegate = strdup("a human");
 
     // For the fencer's purposes, the fencing operation is done
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
     finalize_op(op, msg, false);
 
     /* For the requester's purposes, the operation is still pending. The
      * actual result will be sent asynchronously via the operation's done_cb().
      */
     return EINPROGRESS;
 }
 
 /*!
  * \internal
  * \brief Create a new remote stonith operation
  *
  * \param[in] client   ID of local stonith client that initiated the operation
  * \param[in] request  The request from the client that started the operation
  * \param[in] peer     TRUE if this operation is owned by another stonith peer
  *                     (an operation owned by one peer is stored on all peers,
  *                     but only the owner executes it; all nodes get the results
  *                     once the owner finishes execution)
  */
 void *
 create_remote_stonith_op(const char *client, xmlNode *request, gboolean peer)
 {
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_TARGET, request,
                                     LOG_NEVER);
     int call_options = 0;
     const char *operation = NULL;
 
     init_stonith_remote_op_hash_table(&stonith_remote_op_list);
 
     /* If this operation is owned by another node, check to make
      * sure we haven't already created this operation. */
     if (peer && dev) {
         const char *op_id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP);
 
         CRM_CHECK(op_id != NULL, return NULL);
 
         op = g_hash_table_lookup(stonith_remote_op_list, op_id);
         if (op) {
             crm_debug("Reusing existing remote fencing op %.8s for %s",
                       op_id, ((client == NULL)? "unknown client" : client));
             return op;
         }
     }
 
     op = calloc(1, sizeof(remote_fencing_op_t));
     CRM_ASSERT(op != NULL);
 
     crm_element_value_int(request, PCMK__XA_ST_TIMEOUT, &(op->base_timeout));
     // Value -1 means disable any static/random fencing delays
     crm_element_value_int(request, PCMK__XA_ST_DELAY, &(op->client_delay));
 
     if (peer && dev) {
         op->id = crm_element_value_copy(dev, PCMK__XA_ST_REMOTE_OP);
     } else {
         op->id = crm_generate_uuid();
     }
 
     g_hash_table_replace(stonith_remote_op_list, op->id, op);
 
     op->state = st_query;
     op->replies_expected = fencing_active_peers();
     op->action = crm_element_value_copy(dev, F_STONITH_ACTION);
     op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN);
     op->delegate = crm_element_value_copy(dev, F_STONITH_DELEGATE); /* May not be set */
     op->created = time(NULL);
 
     if (op->originator == NULL) {
         /* Local or relayed request */
         op->originator = strdup(stonith_our_uname);
     }
 
     CRM_LOG_ASSERT(client != NULL);
     if (client) {
         op->client_id = strdup(client);
     }
 
 
     /* For a RELAY operation, set fenced on the client. */
     operation = crm_element_value(request, PCMK__XA_ST_OP);
 
     if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) {
         op->client_name = crm_strdup_printf("%s.%lu", crm_system_name,
                                          (unsigned long) getpid());
     } else {
         op->client_name = crm_element_value_copy(request,
                                                  PCMK__XA_ST_CLIENTNAME);
     }
 
     op->target = crm_element_value_copy(dev, PCMK__XA_ST_TARGET);
     op->request = copy_xml(request);    /* TODO: Figure out how to avoid this */
     crm_element_value_int(request, PCMK__XA_ST_CALLOPT, &call_options);
     op->call_options = call_options;
 
     crm_element_value_int(request, PCMK__XA_ST_CALLID, &(op->client_callid));
 
     crm_trace("%s new fencing op %s ('%s' targeting %s for client %s, "
               "base timeout %d, %u %s expected)",
               (peer && dev)? "Recorded" : "Generated", op->id, op->action,
               op->target, op->client_name, op->base_timeout,
               op->replies_expected,
               pcmk__plural_alt(op->replies_expected, "reply", "replies"));
 
     if (op->call_options & st_opt_cs_nodeid) {
         int nodeid;
         crm_node_t *node;
 
         pcmk__scan_min_int(op->target, &nodeid, 0);
         node = pcmk__search_node_caches(nodeid, NULL,
                                         pcmk__node_search_any
                                         |pcmk__node_search_known);
 
         /* Ensure the conversion only happens once */
         stonith__clear_call_options(op->call_options, op->id, st_opt_cs_nodeid);
 
         if (node && node->uname) {
             free(op->target);
             op->target = strdup(node->uname);
 
         } else {
             crm_warn("Could not expand nodeid '%s' into a host name", op->target);
         }
     }
 
     /* check to see if this is a duplicate operation of another in-flight operation */
     merge_duplicates(op);
 
     if (op->state != st_duplicate) {
         /* kick history readers */
         fenced_send_notification(T_STONITH_NOTIFY_HISTORY, NULL, NULL);
     }
 
     /* safe to trim as long as that doesn't touch pending ops */
     stonith_fence_history_trim();
 
     return op;
 }
 
 /*!
  * \internal
  * \brief Create a peer fencing operation from a request, and initiate it
  *
  * \param[in] client     IPC client that made request (NULL to get from request)
  * \param[in] request    Request XML
  * \param[in] manual_ack Whether this is a manual action confirmation
  *
  * \return Newly created operation on success, otherwise NULL
  */
 remote_fencing_op_t *
 initiate_remote_stonith_op(const pcmk__client_t *client, xmlNode *request,
                            gboolean manual_ack)
 {
     int query_timeout = 0;
     xmlNode *query = NULL;
     const char *client_id = NULL;
     remote_fencing_op_t *op = NULL;
     const char *relay_op_id = NULL;
     const char *operation = NULL;
 
     if (client) {
         client_id = client->id;
     } else {
         client_id = crm_element_value(request, PCMK__XA_ST_CLIENTID);
     }
 
     CRM_LOG_ASSERT(client_id != NULL);
     op = create_remote_stonith_op(client_id, request, FALSE);
     op->owner = TRUE;
     if (manual_ack) {
         return op;
     }
 
     CRM_CHECK(op->action, return NULL);
 
     if (advance_topology_level(op, true) != pcmk_rc_ok) {
         op->state = st_failed;
     }
 
     switch (op->state) {
         case st_failed:
             // advance_topology_level() exhausted levels
             pcmk__set_result(&op->result, CRM_EX_ERROR, PCMK_EXEC_ERROR,
                              "All topology levels failed");
             crm_warn("Could not request peer fencing (%s) targeting %s "
                      CRM_XS " id=%.8s", op->action, op->target, op->id);
             finalize_op(op, NULL, false);
             return op;
 
         case st_duplicate:
             crm_info("Requesting peer fencing (%s) targeting %s (duplicate) "
                      CRM_XS " id=%.8s", op->action, op->target, op->id);
             return op;
 
         default:
             crm_notice("Requesting peer fencing (%s) targeting %s "
                        CRM_XS " id=%.8s state=%s base_timeout=%d",
                        op->action, op->target, op->id,
                        stonith_op_state_str(op->state), op->base_timeout);
     }
 
     query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY,
                               NULL, op->call_options);
 
     crm_xml_add(query, PCMK__XA_ST_REMOTE_OP, op->id);
     crm_xml_add(query, PCMK__XA_ST_TARGET, op->target);
     crm_xml_add(query, F_STONITH_ACTION, op_requested_action(op));
     crm_xml_add(query, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(query, PCMK__XA_ST_CLIENTID, op->client_id);
     crm_xml_add(query, PCMK__XA_ST_CLIENTNAME, op->client_name);
     crm_xml_add_int(query, PCMK__XA_ST_TIMEOUT, op->base_timeout);
 
     /* In case of RELAY operation, RELAY information is added to the query to delete the original operation of RELAY. */
     operation = crm_element_value(request, PCMK__XA_ST_OP);
     if (pcmk__str_eq(operation, STONITH_OP_RELAY, pcmk__str_none)) {
         relay_op_id = crm_element_value(request, PCMK__XA_ST_REMOTE_OP);
         if (relay_op_id) {
             crm_xml_add(query, PCMK__XA_ST_REMOTE_OP_RELAY, relay_op_id);
         }
     }
 
     send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
     free_xml(query);
 
     query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR;
     op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op);
 
     return op;
 }
 
 enum find_best_peer_options {
     /*! Skip checking the target peer for capable fencing devices */
     FIND_PEER_SKIP_TARGET = 0x0001,
     /*! Only check the target peer for capable fencing devices */
     FIND_PEER_TARGET_ONLY = 0x0002,
     /*! Skip peers and devices that are not verified */
     FIND_PEER_VERIFIED_ONLY = 0x0004,
 };
 
 static peer_device_info_t *
 find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options)
 {
     GList *iter = NULL;
     gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE;
 
     if (!device && pcmk_is_set(op->call_options, st_opt_topology)) {
         return NULL;
     }
 
     for (iter = op->query_results; iter != NULL; iter = iter->next) {
         peer_device_info_t *peer = iter->data;
 
         crm_trace("Testing result from %s targeting %s with %d device%s: %d %x",
                   peer->host, op->target, peer->ndevices,
                   pcmk__plural_s(peer->ndevices), peer->tried, options);
         if ((options & FIND_PEER_SKIP_TARGET) && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
             continue;
         }
         if ((options & FIND_PEER_TARGET_ONLY) && !pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
             continue;
         }
 
         if (pcmk_is_set(op->call_options, st_opt_topology)) {
 
             if (grab_peer_device(op, peer, device, verified_devices_only)) {
                 return peer;
             }
 
         } else if (!peer->tried
                    && count_peer_devices(op, peer, verified_devices_only,
                                          fenced_support_flag(op->action))) {
             /* No topology: Use the current best peer */
             crm_trace("Simple fencing");
             return peer;
         }
     }
 
     return NULL;
 }
 
 static peer_device_info_t *
 stonith_choose_peer(remote_fencing_op_t * op)
 {
     const char *device = NULL;
     peer_device_info_t *peer = NULL;
     uint32_t active = fencing_active_peers();
 
     do {
         if (op->devices) {
             device = op->devices->data;
             crm_trace("Checking for someone to fence (%s) %s using %s",
                       op->action, op->target, device);
         } else {
             crm_trace("Checking for someone to fence (%s) %s",
                       op->action, op->target);
         }
 
         /* Best choice is a peer other than the target with verified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY);
         if (peer) {
             crm_trace("Found verified peer %s for %s", peer->host, device?device:"<any>");
             return peer;
         }
 
         if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) {
             crm_trace("Waiting before looking for unverified devices to fence %s", op->target);
             return NULL;
         }
 
         /* If no other peer has verified access, next best is unverified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET);
         if (peer) {
             crm_trace("Found best unverified peer %s", peer->host);
             return peer;
         }
 
         /* If no other peer can do it, last option is self-fencing
          * (which is never allowed for the "on" phase of a remapped reboot)
          */
         if (op->phase != st_phase_on) {
             peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY);
             if (peer) {
                 crm_trace("%s will fence itself", peer->host);
                 return peer;
             }
         }
 
         /* Try the next fencing level if there is one (unless we're in the "on"
          * phase of a remapped "reboot", because we ignore errors in that case)
          */
     } while ((op->phase != st_phase_on)
              && pcmk_is_set(op->call_options, st_opt_topology)
              && (advance_topology_level(op, false) == pcmk_rc_ok));
 
     if ((stonith_watchdog_timeout_ms > 0)
         && pcmk__is_fencing_action(op->action)
         && pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_none)
         && node_does_watchdog_fencing(op->target)) {
         crm_info("Couldn't contact watchdog-fencing target-node (%s)",
                  op->target);
         /* check_watchdog_fencing_and_wait will log additional info */
     } else {
         crm_notice("Couldn't find anyone to fence (%s) %s using %s",
                    op->action, op->target, (device? device : "any device"));
     }
     return NULL;
 }
 
 static int
 get_device_timeout(const remote_fencing_op_t *op,
                    const peer_device_info_t *peer, const char *device,
                    bool with_delay)
 {
     device_properties_t *props;
     int delay = 0;
 
     if (!peer || !device) {
         return op->base_timeout;
     }
 
     props = g_hash_table_lookup(peer->devices, device);
     if (!props) {
         return op->base_timeout;
     }
 
     // op->client_delay < 0 means disable any static/random fencing delays
     if (with_delay && (op->client_delay >= 0)) {
         // delay_base is eventually limited by delay_max
         delay = (props->delay_max[op->phase] > 0 ?
                  props->delay_max[op->phase] : props->delay_base[op->phase]);
     }
 
     return (props->custom_action_timeout[op->phase]?
             props->custom_action_timeout[op->phase] : op->base_timeout)
            + delay;
 }
 
 struct timeout_data {
     const remote_fencing_op_t *op;
     const peer_device_info_t *peer;
     int total_timeout;
 };
 
 /*!
  * \internal
  * \brief Add timeout to a total if device has not been executed yet
  *
  * \param[in]     key        GHashTable key (device ID)
  * \param[in]     value      GHashTable value (device properties)
  * \param[in,out] user_data  Timeout data
  */
 static void
 add_device_timeout(gpointer key, gpointer value, gpointer user_data)
 {
     const char *device_id = key;
     device_properties_t *props = value;
     struct timeout_data *timeout = user_data;
 
     if (!props->executed[timeout->op->phase]
         && !props->disallowed[timeout->op->phase]) {
         timeout->total_timeout += get_device_timeout(timeout->op, timeout->peer,
                                                      device_id, true);
     }
 }
 
 static int
 get_peer_timeout(const remote_fencing_op_t *op, const peer_device_info_t *peer)
 {
     struct timeout_data timeout;
 
     timeout.op = op;
     timeout.peer = peer;
     timeout.total_timeout = 0;
 
     g_hash_table_foreach(peer->devices, add_device_timeout, &timeout);
 
     return (timeout.total_timeout? timeout.total_timeout : op->base_timeout);
 }
 
 static int
 get_op_total_timeout(const remote_fencing_op_t *op,
                      const peer_device_info_t *chosen_peer)
 {
     long long total_timeout = 0;
     stonith_topology_t *tp = find_topology_for_host(op->target);
 
     if (pcmk_is_set(op->call_options, st_opt_topology) && tp) {
         int i;
         GList *device_list = NULL;
         GList *iter = NULL;
         GList *auto_list = NULL;
 
         if (pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)
             && (op->automatic_list != NULL)) {
             auto_list = g_list_copy(op->automatic_list);
         }
 
         /* Yep, this looks scary, nested loops all over the place.
          * Here is what is going on.
          * Loop1: Iterate through fencing levels.
          * Loop2: If a fencing level has devices, loop through each device
          * Loop3: For each device in a fencing level, see what peer owns it
          *        and what that peer has reported the timeout is for the device.
          */
         for (i = 0; i < ST_LEVEL_MAX; i++) {
             if (!tp->levels[i]) {
                 continue;
             }
             for (device_list = tp->levels[i]; device_list; device_list = device_list->next) {
                 /* in case of watchdog-device we add the timeout to the budget
                    regardless of if we got a reply or not
                  */
                 if ((stonith_watchdog_timeout_ms > 0)
                     && pcmk__is_fencing_action(op->action)
                     && pcmk__str_eq(device_list->data, STONITH_WATCHDOG_ID,
                                     pcmk__str_none)
                     && node_does_watchdog_fencing(op->target)) {
                     total_timeout += stonith_watchdog_timeout_ms / 1000;
                     continue;
                 }
 
                 for (iter = op->query_results; iter != NULL; iter = iter->next) {
                     const peer_device_info_t *peer = iter->data;
 
                     if (auto_list) {
                         GList *match = g_list_find_custom(auto_list, device_list->data,
                                         sort_strings);
                         if (match) {
                             auto_list = g_list_remove(auto_list, match->data);
                         }
                     }
 
                     if (find_peer_device(op, peer, device_list->data,
                                          fenced_support_flag(op->action))) {
                         total_timeout += get_device_timeout(op, peer,
                                                             device_list->data,
                                                             true);
                         break;
                     }
                 }               /* End Loop3: match device with peer that owns device, find device's timeout period */
             }                   /* End Loop2: iterate through devices at a specific level */
         }                       /*End Loop1: iterate through fencing levels */
 
         //Add only exists automatic_list device timeout
         if (auto_list) {
             for (iter = auto_list; iter != NULL; iter = iter->next) {
                 GList *iter2 = NULL;
 
                 for (iter2 = op->query_results; iter2 != NULL; iter = iter2->next) {
                     peer_device_info_t *peer = iter2->data;
                     if (find_peer_device(op, peer, iter->data, st_device_supports_on)) {
                         total_timeout += get_device_timeout(op, peer,
                                                             iter->data, true);
                         break;
                     }
                 }
             }
         }
 
         g_list_free(auto_list);
 
     } else if (chosen_peer) {
         total_timeout = get_peer_timeout(op, chosen_peer);
     } else {
         total_timeout = op->base_timeout;
     }
 
     if (total_timeout <= 0) {
         total_timeout = op->base_timeout;
     }
 
     /* Take any requested fencing delay into account to prevent it from eating
      * up the total timeout.
      */
     if (op->client_delay > 0) {
         total_timeout += op->client_delay;
     }
     return (int) QB_MIN(total_timeout, INT_MAX);
 }
 
 static void
 report_timeout_period(remote_fencing_op_t * op, int op_timeout)
 {
     GList *iter = NULL;
     xmlNode *update = NULL;
     const char *client_node = NULL;
     const char *client_id = NULL;
     const char *call_id = NULL;
 
     if (op->call_options & st_opt_sync_call) {
         /* There is no reason to report the timeout for a synchronous call. It
          * is impossible to use the reported timeout to do anything when the client
          * is blocking for the response.  This update is only important for
          * async calls that require a callback to report the results in. */
         return;
     } else if (!op->request) {
         return;
     }
 
     crm_trace("Reporting timeout for %s (id=%.8s)", op->client_name, op->id);
-    client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE);
+    client_node = crm_element_value(op->request, PCMK__XA_ST_CLIENTNODE);
     call_id = crm_element_value(op->request, PCMK__XA_ST_CALLID);
     client_id = crm_element_value(op->request, PCMK__XA_ST_CLIENTID);
     if (!client_node || !call_id || !client_id) {
         return;
     }
 
     if (pcmk__str_eq(client_node, stonith_our_uname, pcmk__str_casei)) {
         // Client is connected to this node, so send update directly to them
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return;
     }
 
     /* The client is connected to another node, relay this update to them */
     update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0);
     crm_xml_add(update, PCMK__XA_ST_REMOTE_OP, op->id);
     crm_xml_add(update, PCMK__XA_ST_CLIENTID, client_id);
     crm_xml_add(update, PCMK__XA_ST_CALLID, call_id);
     crm_xml_add_int(update, PCMK__XA_ST_TIMEOUT, op_timeout);
 
     send_cluster_message(pcmk__get_node(0, client_node, NULL,
                                         pcmk__node_search_cluster),
                          crm_msg_stonith_ng, update, FALSE);
 
     free_xml(update);
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *dup = iter->data;
 
         crm_trace("Reporting timeout for duplicate %.8s to client %s",
                   dup->id, dup->client_name);
         report_timeout_period(iter->data, op_timeout);
     }
 }
 
 /*!
  * \internal
  * \brief Advance an operation to the next device in its topology
  *
  * \param[in,out] op      Fencer operation to advance
  * \param[in]     device  ID of device that just completed
  * \param[in,out] msg     If not NULL, XML reply of last delegated operation
  */
 static void
 advance_topology_device_in_level(remote_fencing_op_t *op, const char *device,
                                  xmlNode *msg)
 {
     /* Advance to the next device at this topology level, if any */
     if (op->devices) {
         op->devices = op->devices->next;
     }
 
     /* Handle automatic unfencing if an "on" action was requested */
     if ((op->phase == st_phase_requested)
         && pcmk__str_eq(op->action, PCMK_ACTION_ON, pcmk__str_none)) {
         /* If the device we just executed was required, it's not anymore */
         remove_required_device(op, device);
 
         /* If there are no more devices at this topology level, run through any
          * remaining devices with automatic unfencing
          */
         if (op->devices == NULL) {
             op->devices = op->automatic_list;
         }
     }
 
     if ((op->devices == NULL) && (op->phase == st_phase_off)) {
         /* We're done with this level and with required devices, but we had
          * remapped "reboot" to "off", so start over with "on". If any devices
          * need to be turned back on, op->devices will be non-NULL after this.
          */
         op_phase_on(op);
     }
 
     // This function is only called if the previous device succeeded
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
 
     if (op->devices) {
         /* Necessary devices remain, so execute the next one */
         crm_trace("Next targeting %s on behalf of %s@%s",
                   op->target, op->client_name, op->originator);
 
         // The requested delay has been applied for the first device
         if (op->client_delay > 0) {
             op->client_delay = 0;
         }
 
         request_peer_fencing(op, NULL);
     } else {
         /* We're done with all devices and phases, so finalize operation */
         crm_trace("Marking complex fencing op targeting %s as complete",
                   op->target);
         op->state = st_done;
         finalize_op(op, msg, false);
     }
 }
 
 static gboolean
 check_watchdog_fencing_and_wait(remote_fencing_op_t * op)
 {
     if (node_does_watchdog_fencing(op->target)) {
         guint timeout_ms = QB_MIN(stonith_watchdog_timeout_ms, UINT_MAX);
 
         crm_notice("Waiting %s for %s to self-fence (%s) for "
                    "client %s " CRM_XS " id=%.8s",
                    pcmk__readable_interval(timeout_ms), op->target, op->action,
                    op->client_name, op->id);
 
         if (op->op_timer_one) {
             g_source_remove(op->op_timer_one);
         }
         op->op_timer_one = g_timeout_add(timeout_ms, remote_op_watchdog_done,
                                          op);
         return TRUE;
     } else {
         crm_debug("Skipping fallback to watchdog-fencing as %s is "
                  "not in host-list", op->target);
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Ask a peer to execute a fencing operation
  *
  * \param[in,out] op      Fencing operation to be executed
  * \param[in,out] peer    If NULL or topology is in use, choose best peer to
  *                        execute the fencing, otherwise use this peer
  */
 static void
 request_peer_fencing(remote_fencing_op_t *op, peer_device_info_t *peer)
 {
     const char *device = NULL;
     int timeout;
 
     CRM_CHECK(op != NULL, return);
 
     crm_trace("Action %.8s targeting %s for %s is %s",
               op->id, op->target, op->client_name,
               stonith_op_state_str(op->state));
 
     if ((op->phase == st_phase_on) && (op->devices != NULL)) {
         /* We are in the "on" phase of a remapped topology reboot. If this
          * device has pcmk_reboot_action="off", or doesn't support the "on"
          * action, skip it.
          *
          * We can't check device properties at this point because we haven't
          * chosen a peer for this stage yet. Instead, we check the local node's
          * knowledge about the device. If different versions of the fence agent
          * are installed on different nodes, there's a chance this could be
          * mistaken, but the worst that could happen is we don't try turning the
          * node back on when we should.
          */
         device = op->devices->data;
         if (pcmk__str_eq(fenced_device_reboot_action(device), PCMK_ACTION_OFF,
                          pcmk__str_none)) {
             crm_info("Not turning %s back on using %s because the device is "
                      "configured to stay off (pcmk_reboot_action='off')",
                      op->target, device);
             advance_topology_device_in_level(op, device, NULL);
             return;
         }
         if (!fenced_device_supports_on(device)) {
             crm_info("Not turning %s back on using %s because the agent "
                      "doesn't support 'on'", op->target, device);
             advance_topology_device_in_level(op, device, NULL);
             return;
         }
     }
 
     timeout = op->base_timeout;
     if ((peer == NULL) && !pcmk_is_set(op->call_options, st_opt_topology)) {
         peer = stonith_choose_peer(op);
     }
 
     if (!op->op_timer_total) {
         op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, peer);
         op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op);
         report_timeout_period(op, op->total_timeout);
         crm_info("Total timeout set to %d for peer's fencing targeting %s for %s"
                  CRM_XS "id=%.8s",
                  op->total_timeout, op->target, op->client_name, op->id);
     }
 
     if (pcmk_is_set(op->call_options, st_opt_topology) && op->devices) {
         /* Ignore the caller's peer preference if topology is in use, because
          * that peer might not have access to the required device. With
          * topology, stonith_choose_peer() removes the device from further
          * consideration, so the timeout must be calculated beforehand.
          *
          * @TODO Basing the total timeout on the caller's preferred peer (above)
          *       is less than ideal.
          */
         peer = stonith_choose_peer(op);
 
         device = op->devices->data;
         /* Fencing timeout sent to peer takes no delay into account.
          * The peer will add a dedicated timer for any delay upon
          * schedule_stonith_command().
          */
         timeout = get_device_timeout(op, peer, device, false);
     }
 
     if (peer) {
         int timeout_one = 0;
         xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0);
 
         if (op->client_delay > 0) {
            /* Take requested fencing delay into account to prevent it from
             * eating up the timeout.
             */
             timeout_one = TIMEOUT_MULTIPLY_FACTOR * op->client_delay;
         }
 
         crm_xml_add(remote_op, PCMK__XA_ST_REMOTE_OP, op->id);
         crm_xml_add(remote_op, PCMK__XA_ST_TARGET, op->target);
         crm_xml_add(remote_op, F_STONITH_ACTION, op->action);
         crm_xml_add(remote_op, F_STONITH_ORIGIN, op->originator);
         crm_xml_add(remote_op, PCMK__XA_ST_CLIENTID, op->client_id);
         crm_xml_add(remote_op, PCMK__XA_ST_CLIENTNAME, op->client_name);
         crm_xml_add_int(remote_op, PCMK__XA_ST_TIMEOUT, timeout);
         crm_xml_add_int(remote_op, PCMK__XA_ST_CALLOPT, op->call_options);
         crm_xml_add_int(remote_op, PCMK__XA_ST_DELAY, op->client_delay);
 
         if (device) {
             timeout_one += TIMEOUT_MULTIPLY_FACTOR *
                            get_device_timeout(op, peer, device, true);
             crm_notice("Requesting that %s perform '%s' action targeting %s "
                        "using %s " CRM_XS " for client %s (%ds)",
                        peer->host, op->action, op->target, device,
                        op->client_name, timeout_one);
             crm_xml_add(remote_op, F_STONITH_DEVICE, device);
 
         } else {
             timeout_one += TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer);
             crm_notice("Requesting that %s perform '%s' action targeting %s "
                        CRM_XS " for client %s (%ds, %lds)",
                        peer->host, op->action, op->target, op->client_name,
                        timeout_one, stonith_watchdog_timeout_ms);
         }
 
         op->state = st_exec;
         if (op->op_timer_one) {
             g_source_remove(op->op_timer_one);
             op->op_timer_one = 0;
         }
 
         if (!((stonith_watchdog_timeout_ms > 0)
               && (pcmk__str_eq(device, STONITH_WATCHDOG_ID, pcmk__str_none)
                   || (pcmk__str_eq(peer->host, op->target, pcmk__str_casei)
                       && pcmk__is_fencing_action(op->action)))
               && check_watchdog_fencing_and_wait(op))) {
 
             /* Some thoughts about self-fencing cases reaching this point:
                - Actually check in check_watchdog_fencing_and_wait
                  shouldn't fail if STONITH_WATCHDOG_ID is
                  chosen as fencing-device and it being present implies
                  watchdog-fencing is enabled anyway
                - If watchdog-fencing is disabled either in general or for
                  a specific target - detected in check_watchdog_fencing_and_wait -
                  for some other kind of self-fencing we can't expect
                  a success answer but timeout is fine if the node doesn't
                  come back in between
                - Delicate might be the case where we have watchdog-fencing
                  enabled for a node but the watchdog-fencing-device isn't
                  explicitly chosen for suicide. Local pe-execution in sbd
                  may detect the node as unclean and lead to timely suicide.
                  Otherwise the selection of PCMK_OPT_STONITH_WATCHDOG_TIMEOUT
                  at least is questionable.
              */
 
             /* coming here we're not waiting for watchdog timeout -
                thus engage timer with timout evaluated before */
             op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op);
         }
 
         send_cluster_message(pcmk__get_node(0, peer->host, NULL,
                                             pcmk__node_search_cluster),
                              crm_msg_stonith_ng, remote_op, FALSE);
         peer->tried = TRUE;
         free_xml(remote_op);
         return;
 
     } else if (op->phase == st_phase_on) {
         /* A remapped "on" cannot be executed, but the node was already
          * turned off successfully, so ignore the error and continue.
          */
         crm_warn("Ignoring %s 'on' failure (no capable peers) targeting %s "
                  "after successful 'off'", device, op->target);
         advance_topology_device_in_level(op, device, NULL);
         return;
 
     } else if (op->owner == FALSE) {
         crm_err("Fencing (%s) targeting %s for client %s is not ours to control",
                 op->action, op->target, op->client_name);
 
     } else if (op->query_timer == 0) {
         /* We've exhausted all available peers */
         crm_info("No remaining peers capable of fencing (%s) %s for client %s "
                  CRM_XS " state=%s", op->action, op->target, op->client_name,
                  stonith_op_state_str(op->state));
         CRM_CHECK(op->state < st_done, return);
         finalize_timed_out_op(op, "All nodes failed, or are unable, to "
                                   "fence target");
 
     } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) {
         /* if the operation never left the query state,
          * but we have all the expected replies, then no devices
          * are available to execute the fencing operation. */
 
         if(stonith_watchdog_timeout_ms > 0 && pcmk__str_eq(device,
            STONITH_WATCHDOG_ID, pcmk__str_null_matches)) {
             if (check_watchdog_fencing_and_wait(op)) {
                 return;
             }
         }
 
         if (op->state == st_query) {
             crm_info("No peers (out of %d) have devices capable of fencing "
                      "(%s) %s for client %s " CRM_XS " state=%s",
                      op->replies, op->action, op->target, op->client_name,
                      stonith_op_state_str(op->state));
 
             pcmk__reset_result(&op->result);
             pcmk__set_result(&op->result, CRM_EX_ERROR,
                              PCMK_EXEC_NO_FENCE_DEVICE, NULL);
         } else {
             if (pcmk_is_set(op->call_options, st_opt_topology)) {
                 pcmk__reset_result(&op->result);
                 pcmk__set_result(&op->result, CRM_EX_ERROR,
                                  PCMK_EXEC_NO_FENCE_DEVICE, NULL);
             }
             /* ... else use existing result from previous failed attempt
              * (topology is not in use, and no devices remain to be attempted).
              * Overwriting the result with PCMK_EXEC_NO_FENCE_DEVICE would
              * prevent finalize_op() from setting the correct delegate if
              * needed.
              */
 
             crm_info("No peers (out of %d) are capable of fencing (%s) %s "
                      "for client %s " CRM_XS " state=%s",
                      op->replies, op->action, op->target, op->client_name,
                      stonith_op_state_str(op->state));
         }
 
         op->state = st_failed;
         finalize_op(op, NULL, false);
 
     } else {
         crm_info("Waiting for additional peers capable of fencing (%s) %s%s%s "
                  "for client %s " CRM_XS " id=%.8s",
                  op->action, op->target, (device? " using " : ""),
                  (device? device : ""), op->client_name, op->id);
     }
 }
 
 /*!
  * \internal
  * \brief Comparison function for sorting query results
  *
  * \param[in] a  GList item to compare
  * \param[in] b  GList item to compare
  *
  * \return Per the glib documentation, "a negative integer if the first value
  *         comes before the second, 0 if they are equal, or a positive integer
  *         if the first value comes after the second."
  */
 static gint
 sort_peers(gconstpointer a, gconstpointer b)
 {
     const peer_device_info_t *peer_a = a;
     const peer_device_info_t *peer_b = b;
 
     return (peer_b->ndevices - peer_a->ndevices);
 }
 
 /*!
  * \internal
  * \brief Determine if all the devices in the topology are found or not
  *
  * \param[in] op  Fencing operation with topology to check
  */
 static gboolean
 all_topology_devices_found(const remote_fencing_op_t *op)
 {
     GList *device = NULL;
     GList *iter = NULL;
     device_properties_t *match = NULL;
     stonith_topology_t *tp = NULL;
     gboolean skip_target = FALSE;
     int i;
 
     tp = find_topology_for_host(op->target);
     if (!tp) {
         return FALSE;
     }
     if (pcmk__is_fencing_action(op->action)) {
         /* Don't count the devices on the target node if we are killing
          * the target node. */
         skip_target = TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         for (device = tp->levels[i]; device; device = device->next) {
             match = NULL;
             for (iter = op->query_results; iter && !match; iter = iter->next) {
                 peer_device_info_t *peer = iter->data;
 
                 if (skip_target && pcmk__str_eq(peer->host, op->target, pcmk__str_casei)) {
                     continue;
                 }
                 match = find_peer_device(op, peer, device->data, st_device_supports_none);
             }
             if (!match) {
                 return FALSE;
             }
         }
     }
 
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Parse action-specific device properties from XML
  *
  * \param[in]     xml     XML element containing the properties
  * \param[in]     peer    Name of peer that sent XML (for logs)
  * \param[in]     device  Device ID (for logs)
  * \param[in]     action  Action the properties relate to (for logs)
  * \param[in,out] op      Fencing operation that properties are being parsed for
  * \param[in]     phase   Phase the properties relate to
  * \param[in,out] props   Device properties to update
  */
 static void
 parse_action_specific(const xmlNode *xml, const char *peer, const char *device,
                       const char *action, remote_fencing_op_t *op,
                       enum st_remap_phase phase, device_properties_t *props)
 {
     props->custom_action_timeout[phase] = 0;
     crm_element_value_int(xml, PCMK__XA_ST_ACTION_TIMEOUT,
                           &props->custom_action_timeout[phase]);
     if (props->custom_action_timeout[phase]) {
         crm_trace("Peer %s with device %s returned %s action timeout %d",
                   peer, device, action, props->custom_action_timeout[phase]);
     }
 
     props->delay_max[phase] = 0;
     crm_element_value_int(xml, PCMK__XA_ST_DELAY_MAX, &props->delay_max[phase]);
     if (props->delay_max[phase]) {
         crm_trace("Peer %s with device %s returned maximum of random delay %d for %s",
                   peer, device, props->delay_max[phase], action);
     }
 
     props->delay_base[phase] = 0;
     crm_element_value_int(xml, PCMK__XA_ST_DELAY_BASE,
                           &props->delay_base[phase]);
     if (props->delay_base[phase]) {
         crm_trace("Peer %s with device %s returned base delay %d for %s",
                   peer, device, props->delay_base[phase], action);
     }
 
     /* Handle devices with automatic unfencing */
     if (pcmk__str_eq(action, PCMK_ACTION_ON, pcmk__str_none)) {
         int required = 0;
 
         crm_element_value_int(xml, PCMK__XA_ST_REQUIRED, &required);
         if (required) {
             crm_trace("Peer %s requires device %s to execute for action %s",
                       peer, device, action);
             add_required_device(op, device);
         }
     }
 
     /* If a reboot is remapped to off+on, it's possible that a node is allowed
      * to perform one action but not another.
      */
     if (pcmk__xe_attr_is_true(xml, PCMK__XA_ST_ACTION_DISALLOWED)) {
         props->disallowed[phase] = TRUE;
         crm_trace("Peer %s is disallowed from executing %s for device %s",
                   peer, action, device);
     }
 }
 
 /*!
  * \internal
  * \brief Parse one device's properties from peer's XML query reply
  *
  * \param[in]     xml       XML node containing device properties
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in,out] peer      Peer's device information
  * \param[in]     device    ID of device being parsed
  */
 static void
 add_device_properties(const xmlNode *xml, remote_fencing_op_t *op,
                       peer_device_info_t *peer, const char *device)
 {
     xmlNode *child;
     int verified = 0;
     device_properties_t *props = calloc(1, sizeof(device_properties_t));
     int flags = st_device_supports_on; /* Old nodes that don't set the flag assume they support the on action */
 
     /* Add a new entry to this peer's devices list */
     CRM_ASSERT(props != NULL);
     g_hash_table_insert(peer->devices, strdup(device), props);
 
     /* Peers with verified (monitored) access will be preferred */
     crm_element_value_int(xml, PCMK__XA_ST_MONITOR_VERIFIED, &verified);
     if (verified) {
         crm_trace("Peer %s has confirmed a verified device %s",
                   peer->host, device);
         props->verified = TRUE;
     }
 
     crm_element_value_int(xml, F_STONITH_DEVICE_SUPPORT_FLAGS, &flags);
     props->device_support_flags = flags;
 
     /* Parse action-specific device properties */
     parse_action_specific(xml, peer->host, device, op_requested_action(op),
                           op, st_phase_requested, props);
     for (child = pcmk__xml_first_child(xml); child != NULL;
          child = pcmk__xml_next(child)) {
         /* Replies for "reboot" operations will include the action-specific
          * values for "off" and "on" in child elements, just in case the reboot
          * winds up getting remapped.
          */
         if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_OFF, pcmk__str_none)) {
             parse_action_specific(child, peer->host, device, PCMK_ACTION_OFF,
                                   op, st_phase_off, props);
 
         } else if (pcmk__str_eq(pcmk__xe_id(child), PCMK_ACTION_ON,
                                 pcmk__str_none)) {
             parse_action_specific(child, peer->host, device, PCMK_ACTION_ON,
                                   op, st_phase_on, props);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Parse a peer's XML query reply and add it to operation's results
  *
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in]     host      Name of peer that sent this reply
  * \param[in]     ndevices  Number of devices expected in reply
  * \param[in]     xml       XML node containing device list
  *
  * \return Newly allocated result structure with parsed reply
  */
 static peer_device_info_t *
 add_result(remote_fencing_op_t *op, const char *host, int ndevices,
            const xmlNode *xml)
 {
     peer_device_info_t *peer = calloc(1, sizeof(peer_device_info_t));
     xmlNode *child;
 
     // cppcheck seems not to understand the abort logic in CRM_CHECK
     // cppcheck-suppress memleak
     CRM_CHECK(peer != NULL, return NULL);
     peer->host = strdup(host);
     peer->devices = pcmk__strkey_table(free, free);
 
     /* Each child element describes one capable device available to the peer */
     for (child = pcmk__xml_first_child(xml); child != NULL;
          child = pcmk__xml_next(child)) {
         const char *device = pcmk__xe_id(child);
 
         if (device) {
             add_device_properties(child, op, peer, device);
         }
     }
 
     peer->ndevices = g_hash_table_size(peer->devices);
     CRM_CHECK(ndevices == peer->ndevices,
               crm_err("Query claimed to have %d device%s but %d found",
                       ndevices, pcmk__plural_s(ndevices), peer->ndevices));
 
     op->query_results = g_list_insert_sorted(op->query_results, peer, sort_peers);
     return peer;
 }
 
 /*!
  * \internal
  * \brief Handle a peer's reply to our fencing query
  *
  * Parse a query result from XML and store it in the remote operation
  * table, and when enough replies have been received, issue a fencing request.
  *
  * \param[in] msg  XML reply received
  *
  * \return pcmk_ok on success, -errno on error
  *
  * \note See initiate_remote_stonith_op() for how the XML query was initially
  *       formed, and stonith_query() for how the peer formed its XML reply.
  */
 int
 process_remote_stonith_query(xmlNode *msg)
 {
     int ndevices = 0;
     gboolean host_is_target = FALSE;
     gboolean have_all_replies = FALSE;
     const char *id = NULL;
     const char *host = NULL;
     remote_fencing_op_t *op = NULL;
     peer_device_info_t *peer = NULL;
     uint32_t replies_expected;
     xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@" PCMK__XA_ST_AVAILABLE_DEVICES, msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
     crm_element_value_int(dev, PCMK__XA_ST_AVAILABLE_DEVICES, &ndevices);
 
     op = g_hash_table_lookup(stonith_remote_op_list, id);
     if (op == NULL) {
         crm_debug("Received query reply for unknown or expired operation %s",
                   id);
         return -EOPNOTSUPP;
     }
 
     replies_expected = fencing_active_peers();
     if (op->replies_expected < replies_expected) {
         replies_expected = op->replies_expected;
     }
     if ((++op->replies >= replies_expected) && (op->state == st_query)) {
         have_all_replies = TRUE;
     }
     host = crm_element_value(msg, PCMK__XA_SRC);
     host_is_target = pcmk__str_eq(host, op->target, pcmk__str_casei);
 
     crm_info("Query result %d of %d from %s for %s/%s (%d device%s) %s",
              op->replies, replies_expected, host,
              op->target, op->action, ndevices, pcmk__plural_s(ndevices), id);
     if (ndevices > 0) {
         peer = add_result(op, host, ndevices, dev);
     }
 
     pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
 
     if (pcmk_is_set(op->call_options, st_opt_topology)) {
         /* If we start the fencing before all the topology results are in,
          * it is possible fencing levels will be skipped because of the missing
          * query results. */
         if (op->state == st_query && all_topology_devices_found(op)) {
             /* All the query results are in for the topology, start the fencing ops. */
             crm_trace("All topology devices found");
             request_peer_fencing(op, peer);
 
         } else if (have_all_replies) {
             crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             request_peer_fencing(op, NULL);
         }
 
     } else if (op->state == st_query) {
         int nverified = count_peer_devices(op, peer, TRUE,
                                            fenced_support_flag(op->action));
 
         /* We have a result for a non-topology fencing op that looks promising,
          * go ahead and start fencing before query timeout */
         if ((peer != NULL) && !host_is_target && nverified) {
             /* we have a verified device living on a peer that is not the target */
             crm_trace("Found %d verified device%s",
                       nverified, pcmk__plural_s(nverified));
             request_peer_fencing(op, peer);
 
         } else if (have_all_replies) {
             crm_info("All query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             request_peer_fencing(op, NULL);
 
         } else {
             crm_trace("Waiting for more peer results before launching fencing operation");
         }
 
     } else if ((peer != NULL) && (op->state == st_done)) {
         crm_info("Discarding query result from %s (%d device%s): "
                  "Operation is %s", peer->host,
                  peer->ndevices, pcmk__plural_s(peer->ndevices),
                  stonith_op_state_str(op->state));
     }
 
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Handle a peer's reply to a fencing request
  *
  * Parse a fencing reply from XML, and either finalize the operation
  * or attempt another device as appropriate.
  *
  * \param[in] msg  XML reply received
  */
 void
 fenced_process_fencing_reply(xmlNode *msg)
 {
     const char *id = NULL;
     const char *device = NULL;
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" PCMK__XA_ST_REMOTE_OP, msg, LOG_ERR);
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
 
     CRM_CHECK(dev != NULL, return);
 
     id = crm_element_value(dev, PCMK__XA_ST_REMOTE_OP);
     CRM_CHECK(id != NULL, return);
 
     dev = stonith__find_xe_with_result(msg);
     CRM_CHECK(dev != NULL, return);
 
     stonith__xe_get_result(dev, &result);
 
     device = crm_element_value(dev, F_STONITH_DEVICE);
 
     if (stonith_remote_op_list) {
         op = g_hash_table_lookup(stonith_remote_op_list, id);
     }
 
     if ((op == NULL) && pcmk__result_ok(&result)) {
         /* Record successful fencing operations */
         const char *client_id = crm_element_value(dev, PCMK__XA_ST_CLIENTID);
 
         op = create_remote_stonith_op(client_id, dev, TRUE);
     }
 
     if (op == NULL) {
         /* Could be for an event that began before we started */
         /* TODO: Record the op for later querying */
         crm_info("Received peer result of unknown or expired operation %s", id);
         pcmk__reset_result(&result);
         return;
     }
 
     pcmk__reset_result(&op->result);
     op->result = result; // The operation takes ownership of the result
 
     if (op->devices && device && !pcmk__str_eq(op->devices->data, device, pcmk__str_casei)) {
         crm_err("Received outdated reply for device %s (instead of %s) to "
                 "fence (%s) %s. Operation already timed out at peer level.",
                 device, (const char *) op->devices->data, op->action, op->target);
         return;
     }
 
     if (pcmk__str_eq(crm_element_value(msg, PCMK__XA_SUBT), "broadcast",
                      pcmk__str_none)) {
 
         if (pcmk__result_ok(&op->result)) {
             op->state = st_done;
         } else {
             op->state = st_failed;
         }
         finalize_op(op, msg, false);
         return;
 
     } else if (!pcmk__str_eq(op->originator, stonith_our_uname, pcmk__str_casei)) {
         /* If this isn't a remote level broadcast, and we are not the
          * originator of the operation, we should not be receiving this msg. */
         crm_err("Received non-broadcast fencing result for operation %.8s "
                 "we do not own (device %s targeting %s)",
                 op->id, device, op->target);
         return;
     }
 
     if (pcmk_is_set(op->call_options, st_opt_topology)) {
         const char *device = NULL;
         const char *reason = op->result.exit_reason;
 
         /* We own the op, and it is complete. broadcast the result to all nodes
          * and notify our local clients. */
         if (op->state == st_done) {
             finalize_op(op, msg, false);
             return;
         }
 
         device = crm_element_value(msg, F_STONITH_DEVICE);
 
         if ((op->phase == 2) && !pcmk__result_ok(&op->result)) {
             /* A remapped "on" failed, but the node was already turned off
              * successfully, so ignore the error and continue.
              */
             crm_warn("Ignoring %s 'on' failure (%s%s%s) targeting %s "
                      "after successful 'off'",
                      device, pcmk_exec_status_str(op->result.execution_status),
                      (reason == NULL)? "" : ": ",
                      (reason == NULL)? "" : reason,
                      op->target);
             pcmk__set_result(&op->result, CRM_EX_OK, PCMK_EXEC_DONE, NULL);
         } else {
             crm_notice("Action '%s' targeting %s%s%s on behalf of %s@%s: "
                        "%s%s%s%s",
                        op->action, op->target,
                        ((device == NULL)? "" : " using "),
                        ((device == NULL)? "" : device),
                        op->client_name,
                        op->originator,
                        pcmk_exec_status_str(op->result.execution_status),
                        (reason == NULL)? "" : " (",
                        (reason == NULL)? "" : reason,
                        (reason == NULL)? "" : ")");
         }
 
         if (pcmk__result_ok(&op->result)) {
             /* An operation completed successfully. Try another device if
              * necessary, otherwise mark the operation as done. */
             advance_topology_device_in_level(op, device, msg);
             return;
         } else {
             /* This device failed, time to try another topology level. If no other
              * levels are available, mark this operation as failed and report results. */
             if (advance_topology_level(op, false) != pcmk_rc_ok) {
                 op->state = st_failed;
                 finalize_op(op, msg, false);
                 return;
             }
         }
 
     } else if (pcmk__result_ok(&op->result) && (op->devices == NULL)) {
         op->state = st_done;
         finalize_op(op, msg, false);
         return;
 
     } else if ((op->result.execution_status == PCMK_EXEC_TIMEOUT)
                && (op->devices == NULL)) {
         /* If the operation timed out don't bother retrying other peers. */
         op->state = st_failed;
         finalize_op(op, msg, false);
         return;
 
     } else {
         /* fall-through and attempt other fencing action using another peer */
     }
 
     /* Retry on failure */
     crm_trace("Next for %s on behalf of %s@%s (result was: %s)",
               op->target, op->originator, op->client_name,
               pcmk_exec_status_str(op->result.execution_status));
     request_peer_fencing(op, NULL);
 }
 
 gboolean
 stonith_check_fence_tolerance(int tolerance, const char *target, const char *action)
 {
     GHashTableIter iter;
     time_t now = time(NULL);
     remote_fencing_op_t *rop = NULL;
 
     if (tolerance <= 0 || !stonith_remote_op_list || target == NULL ||
         action == NULL) {
         return FALSE;
     }
 
     g_hash_table_iter_init(&iter, stonith_remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) {
         if (strcmp(rop->target, target) != 0) {
             continue;
         } else if (rop->state != st_done) {
             continue;
         /* We don't have to worry about remapped reboots here
          * because if state is done, any remapping has been undone
          */
         } else if (strcmp(rop->action, action) != 0) {
             continue;
         } else if ((rop->completed + tolerance) < now) {
             continue;
         }
 
         crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s",
                    target, action, tolerance, rop->delegate, rop->originator);
         return TRUE;
     }
     return FALSE;
 }
diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c
index 4c83bab943..62e431fc8a 100644
--- a/daemons/fenced/pacemaker-fenced.c
+++ b/daemons/fenced/pacemaker-fenced.c
@@ -1,998 +1,998 @@
 /*
  * Copyright 2009-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>  // PRIu32, PRIx32
 
 #include <crm/crm.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>
 #include <crm/common/output_internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/mainloop.h>
 
 #include <crm/cib/internal.h>
 
 #include <pacemaker-fenced.h>
 
 #define SUMMARY "daemon for executing fencing devices in a Pacemaker cluster"
 
 char *stonith_our_uname = NULL;
 long long stonith_watchdog_timeout_ms = 0;
 GList *stonith_watchdog_targets = NULL;
 
 static GMainLoop *mainloop = NULL;
 
 gboolean stand_alone = FALSE;
 gboolean stonith_shutdown_flag = FALSE;
 
 static qb_ipcs_service_t *ipcs = NULL;
 static pcmk__output_t *out = NULL;
 
 pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 static struct {
     bool no_cib_connect;
     gchar **log_files;
 } options;
 
 crm_exit_t exit_code = CRM_EX_OK;
 
 static void stonith_cleanup(void);
 
 static int32_t
 st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
 {
     if (stonith_shutdown_flag) {
         crm_info("Ignoring new client [%d] during shutdown",
                  pcmk__client_pid(c));
         return -ECONNREFUSED;
     }
 
     if (pcmk__new_client(c, uid, gid) == NULL) {
         return -ENOMEM;
     }
     return 0;
 }
 
 /* Exit code means? */
 static int32_t
 st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
 {
     uint32_t id = 0;
     uint32_t flags = 0;
     int call_options = 0;
     xmlNode *request = NULL;
     pcmk__client_t *c = pcmk__find_client(qbc);
     const char *op = NULL;
 
     if (c == NULL) {
         crm_info("Invalid client: %p", qbc);
         return 0;
     }
 
     request = pcmk__client_data2xml(c, data, &id, &flags);
     if (request == NULL) {
         pcmk__ipc_send_ack(c, id, flags, PCMK__XE_NACK, NULL, CRM_EX_PROTOCOL);
         return 0;
     }
 
 
     op = crm_element_value(request, PCMK__XA_CRM_TASK);
     if(pcmk__str_eq(op, CRM_OP_RM_NODE_CACHE, pcmk__str_casei)) {
         crm_xml_add(request, PCMK__XA_T, T_STONITH_NG);
         crm_xml_add(request, PCMK__XA_ST_OP, op);
         crm_xml_add(request, PCMK__XA_ST_CLIENTID, c->id);
         crm_xml_add(request, PCMK__XA_ST_CLIENTNAME, pcmk__client_name(c));
-        crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
+        crm_xml_add(request, PCMK__XA_ST_CLIENTNODE, stonith_our_uname);
 
         send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
         free_xml(request);
         return 0;
     }
 
     if (c->name == NULL) {
         const char *value = crm_element_value(request, PCMK__XA_ST_CLIENTNAME);
 
         c->name = crm_strdup_printf("%s.%u", pcmk__s(value, "unknown"), c->pid);
     }
 
     crm_element_value_int(request, PCMK__XA_ST_CALLOPT, &call_options);
     crm_trace("Flags %#08" PRIx32 "/%#08x for command %" PRIu32
               " from client %s", flags, call_options, id, pcmk__client_name(c));
 
     if (pcmk_is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(flags & crm_ipc_client_response);
         CRM_LOG_ASSERT(c->request_id == 0);     /* This means the client has two synchronous events in-flight */
         c->request_id = id;     /* Reply only to the last one */
     }
 
     crm_xml_add(request, PCMK__XA_ST_CLIENTID, c->id);
     crm_xml_add(request, PCMK__XA_ST_CLIENTNAME, pcmk__client_name(c));
-    crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
+    crm_xml_add(request, PCMK__XA_ST_CLIENTNODE, stonith_our_uname);
 
     crm_log_xml_trace(request, "ipc-received");
     stonith_command(c, id, flags, request, NULL);
 
     free_xml(request);
     return 0;
 }
 
 /* Error code means? */
 static int32_t
 st_ipc_closed(qb_ipcs_connection_t * c)
 {
     pcmk__client_t *client = pcmk__find_client(c);
 
     if (client == NULL) {
         return 0;
     }
 
     crm_trace("Connection %p closed", c);
     pcmk__free_client(client);
 
     /* 0 means: yes, go ahead and destroy the connection */
     return 0;
 }
 
 static void
 st_ipc_destroy(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection %p destroyed", c);
     st_ipc_closed(c);
 }
 
 static void
 stonith_peer_callback(xmlNode * msg, void *private_data)
 {
     const char *remote_peer = crm_element_value(msg, PCMK__XA_SRC);
     const char *op = crm_element_value(msg, PCMK__XA_ST_OP);
 
     if (pcmk__str_eq(op, "poke", pcmk__str_none)) {
         return;
     }
 
     crm_log_xml_trace(msg, "Peer[inbound]");
     stonith_command(NULL, 0, 0, msg, remote_peer);
 }
 
 #if SUPPORT_COROSYNC
 static void
 stonith_peer_ais_callback(cpg_handle_t handle,
                           const struct cpg_name *groupName,
                           uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     uint32_t kind = 0;
     xmlNode *xml = NULL;
     const char *from = NULL;
     char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
 
     if(data == NULL) {
         return;
     }
     if (kind == crm_class_cluster) {
         xml = string2xml(data);
         if (xml == NULL) {
             crm_err("Invalid XML: '%.120s'", data);
             free(data);
             return;
         }
         crm_xml_add(xml, PCMK__XA_SRC, from);
         stonith_peer_callback(xml, NULL);
     }
 
     free_xml(xml);
     free(data);
     return;
 }
 
 static void
 stonith_peer_cs_destroy(gpointer user_data)
 {
     crm_crit("Lost connection to cluster layer, shutting down");
     stonith_shutdown(0);
 }
 #endif
 
 void
 do_local_reply(const xmlNode *notify_src, pcmk__client_t *client,
                int call_options)
 {
     /* send callback to originating child */
     int local_rc = pcmk_rc_ok;
     int rid = 0;
     uint32_t ipc_flags = crm_ipc_server_event;
 
     if (pcmk_is_set(call_options, st_opt_sync_call)) {
         CRM_LOG_ASSERT(client->request_id);
         rid = client->request_id;
         client->request_id = 0;
         ipc_flags = crm_ipc_flags_none;
     }
 
     local_rc = pcmk__ipc_send_xml(client, rid, notify_src, ipc_flags);
     if (local_rc == pcmk_rc_ok) {
         crm_trace("Sent response %d to client %s",
                   rid, pcmk__client_name(client));
     } else {
         crm_warn("%synchronous reply to client %s failed: %s",
                  (pcmk_is_set(call_options, st_opt_sync_call)? "S" : "As"),
                  pcmk__client_name(client), pcmk_rc_str(local_rc));
     }
 }
 
 uint64_t
 get_stonith_flag(const char *name)
 {
     if (pcmk__str_eq(name, T_STONITH_NOTIFY_FENCE, pcmk__str_casei)) {
         return st_callback_notify_fence;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_ADD, pcmk__str_casei)) {
         return st_callback_device_add;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_DEL, pcmk__str_casei)) {
         return st_callback_device_del;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY, pcmk__str_casei)) {
         return st_callback_notify_history;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED, pcmk__str_casei)) {
         return st_callback_notify_history_synced;
 
     }
     return st_callback_unknown;
 }
 
 static void
 stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
 {
 
     const xmlNode *update_msg = user_data;
     pcmk__client_t *client = value;
     const char *type = NULL;
 
     CRM_CHECK(client != NULL, return);
     CRM_CHECK(update_msg != NULL, return);
 
     type = crm_element_value(update_msg, PCMK__XA_SUBT);
     CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return);
 
     if (client->ipcs == NULL) {
         crm_trace("Skipping client with NULL channel");
         return;
     }
 
     if (pcmk_is_set(client->flags, get_stonith_flag(type))) {
         int rc = pcmk__ipc_send_xml(client, 0, update_msg,
                                     crm_ipc_server_event);
 
         if (rc != pcmk_rc_ok) {
             crm_warn("%s notification of client %s failed: %s "
                      CRM_XS " id=%.8s rc=%d", type, pcmk__client_name(client),
                      pcmk_rc_str(rc), client->id, rc);
         } else {
             crm_trace("Sent %s notification to client %s",
                       type, pcmk__client_name(client));
         }
     }
 }
 
 void
 do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout)
 {
     pcmk__client_t *client = NULL;
     xmlNode *notify_data = NULL;
 
     if (!timeout || !call_id || !client_id) {
         return;
     }
 
     client = pcmk__find_client_by_id(client_id);
     if (!client) {
         return;
     }
 
     notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, PCMK__XA_T, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, PCMK__XA_ST_CALLID, call_id);
     crm_xml_add_int(notify_data, PCMK__XA_ST_TIMEOUT, timeout);
 
     crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id);
 
     if (client) {
         pcmk__ipc_send_xml(client, 0, notify_data, crm_ipc_server_event);
     }
 
     free_xml(notify_data);
 }
 
 /*!
  * \internal
  * \brief Notify relevant IPC clients of a fencing operation result
  *
  * \param[in] type     Notification type
  * \param[in] result   Result of fencing operation (assume success if NULL)
  * \param[in] data     If not NULL, add to notification as call data
  */
 void
 fenced_send_notification(const char *type, const pcmk__action_result_t *result,
                          xmlNode *data)
 {
     /* TODO: Standardize the contents of data */
     xmlNode *update_msg = create_xml_node(NULL, "notify");
 
     CRM_LOG_ASSERT(type != NULL);
 
     crm_xml_add(update_msg, PCMK__XA_T, T_STONITH_NOTIFY);
     crm_xml_add(update_msg, PCMK__XA_SUBT, type);
     crm_xml_add(update_msg, PCMK__XA_ST_OP, type);
     stonith__xe_set_result(update_msg, result);
 
     if (data != NULL) {
         add_message_xml(update_msg, PCMK__XA_ST_CALLDATA, data);
     }
 
     crm_trace("Notifying clients");
     pcmk__foreach_ipc_client(stonith_notify_client, update_msg);
     free_xml(update_msg);
     crm_trace("Notify complete");
 }
 
 /*!
  * \internal
  * \brief Send notifications for a configuration change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_DEVICE_ADD,
  *                    STONITH_OP_DEVICE_DEL, STONITH_OP_LEVEL_ADD, or
  *                    STONITH_OP_LEVEL_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    Description of what changed
  * \param[in] active  Current number of devices or topologies in use
  */
 static void
 send_config_notification(const char *op, const pcmk__action_result_t *result,
                          const char *desc, int active)
 {
     xmlNode *notify_data = create_xml_node(NULL, op);
 
     CRM_CHECK(notify_data != NULL, return);
 
     crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
     crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active);
 
     fenced_send_notification(op, result, notify_data);
     free_xml(notify_data);
 }
 
 /*!
  * \internal
  * \brief Send notifications for a device change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_DEVICE_ADD or
  *                    STONITH_OP_DEVICE_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    ID of device that changed
  */
 void
 fenced_send_device_notification(const char *op,
                                 const pcmk__action_result_t *result,
                                 const char *desc)
 {
     send_config_notification(op, result, desc, g_hash_table_size(device_list));
 }
 
 /*!
  * \internal
  * \brief Send notifications for a topology level change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_LEVEL_ADD or
  *                    STONITH_OP_LEVEL_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    String representation of level (<target>[<level_index>])
  */
 void
 fenced_send_level_notification(const char *op,
                                const pcmk__action_result_t *result,
                                const char *desc)
 {
     send_config_notification(op, result, desc, g_hash_table_size(topology));
 }
 
 /*!
  * \internal
  * \brief Check whether a node does watchdog-fencing
  *
  * \param[in] node    Name of node to check
  *
  * \return TRUE if node found in stonith_watchdog_targets
  *         or stonith_watchdog_targets is empty indicating
  *         all nodes are doing watchdog-fencing
  */
 gboolean
 node_does_watchdog_fencing(const char *node)
 {
     return ((stonith_watchdog_targets == NULL) ||
             pcmk__str_in_list(node, stonith_watchdog_targets, pcmk__str_casei));
 }
 
 void
 stonith_shutdown(int nsig)
 {
     crm_info("Terminating with %d clients", pcmk__ipc_client_count());
     stonith_shutdown_flag = TRUE;
     if (mainloop != NULL && g_main_loop_is_running(mainloop)) {
         g_main_loop_quit(mainloop);
     }
 }
 
 static void
 stonith_cleanup(void)
 {
     fenced_cib_cleanup();
     if (ipcs) {
         qb_ipcs_destroy(ipcs);
     }
 
     crm_peer_destroy();
     pcmk__client_cleanup();
     free_stonith_remote_op_list();
     free_topology_list();
     free_device_list();
     free_metadata_cache();
     fenced_unregister_handlers();
 
     free(stonith_our_uname);
     stonith_our_uname = NULL;
 }
 
 static gboolean
 stand_alone_cpg_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                    GError **error)
 {
     stand_alone = FALSE;
     options.no_cib_connect = true;
     return TRUE;
 }
 
 struct qb_ipcs_service_handlers ipc_callbacks = {
     .connection_accept = st_ipc_accept,
     .connection_created = NULL,
     .msg_process = st_ipc_dispatch,
     .connection_closed = st_ipc_closed,
     .connection_destroyed = st_ipc_destroy
 };
 
 /*!
  * \internal
  * \brief Callback for peer status changes
  *
  * \param[in] type  What changed
  * \param[in] node  What peer had the change
  * \param[in] data  Previous value of what changed
  */
 static void
 st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
 {
     if ((type != crm_status_processes)
         && !pcmk_is_set(node->flags, crm_remote_node)) {
         /*
          * This is a hack until we can send to a nodeid and/or we fix node name lookups
          * These messages are ignored in stonith_peer_callback()
          */
         xmlNode *query = create_xml_node(NULL, "stonith_command");
 
         crm_xml_add(query, PCMK__XA_T, T_STONITH_NG);
         crm_xml_add(query, PCMK__XA_ST_OP, "poke");
 
         crm_debug("Broadcasting our uname because of node %u", node->id);
         send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
 
         free_xml(query);
     }
 }
 
 static pcmk__cluster_option_t fencer_options[] = {
     /* name, old name, type, allowed values,
      * default value, validator,
      * context,
      * short description,
      * long description
      */
     {
         PCMK_STONITH_HOST_ARGUMENT, NULL, "string", NULL,
         "port", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "An alternate parameter to supply instead of 'port'"),
         N_("Some devices do not support the standard 'port' parameter or may "
             "provide additional ones. Use this to specify an alternate, device-"
             "specific, parameter that should indicate the machine to be "
             "fenced. A value of \"none\" can be used to tell the cluster not "
             "to supply any additional parameters."),
     },
     {
         PCMK_STONITH_HOST_MAP, NULL, "string", NULL,
         "", NULL,
         pcmk__opt_context_none,
         N_("A mapping of node names to port numbers for devices that do not "
             "support node names."),
         N_("For example, \"node1:1;node2:2,3\" would tell the cluster to use "
             "port 1 for node1 and ports 2 and 3 for node2."),
     },
     {
         PCMK_STONITH_HOST_LIST, NULL, "string", NULL,
         "", NULL,
         pcmk__opt_context_none,
         N_("A list of nodes that can be targeted by this device (optional "
             "unless pcmk_host_list=\"static-list\")"),
         N_("For example, \"node1,node2,node3\"."),
     },
     {
         PCMK_STONITH_HOST_CHECK, NULL, "select",
             "dynamic-list, static-list, status, none",
         "dynamic-list", NULL,
         pcmk__opt_context_none,
         N_("How to determine which nodes can be targeted by the device"),
         N_("Use \"dynamic-list\" to query the device via the 'list' command; "
             "\"static-list\" to check the pcmk_host_list attribute; "
             "\"status\" to query the device via the 'status' command; or "
             "\"none\" to assume every device can fence every node."),
     },
     {
         PCMK_STONITH_DELAY_MAX, NULL, "time", NULL,
         "0s", NULL,
         pcmk__opt_context_none,
         N_("Enable a base delay for fencing actions and specify base delay "
             "value."),
         N_("Enable a delay of no more than the time specified before executing "
             "fencing actions. Pacemaker derives the overall delay by taking "
             "the value of pcmk_delay_base and adding a random delay value such "
             "that the sum is kept below this maximum."),
     },
     {
         PCMK_STONITH_DELAY_BASE, NULL, "string", NULL,
         "0s", NULL,
         pcmk__opt_context_none,
         N_("Enable a base delay for fencing actions and specify base delay "
             "value."),
         N_("This enables a static delay for fencing actions, which can help "
             "avoid \"death matches\" where two nodes try to fence each other "
             "at the same time. If pcmk_delay_max is also used, a random delay "
             "will be added such that the total delay is kept below that value. "
             "This can be set to a single time value to apply to any node "
             "targeted by this device (useful if a separate device is "
             "configured for each target), or to a node map (for example, "
             "\"node1:1s;node2:5\") to set a different value for each target."),
     },
     {
         PCMK_STONITH_ACTION_LIMIT, NULL, "integer", NULL,
         "1", NULL,
         pcmk__opt_context_none,
         N_("The maximum number of actions can be performed in parallel on this "
             "device"),
         N_("Cluster property concurrent-fencing=\"true\" needs to be "
             "configured first. Then use this to specify the maximum number of "
             "actions can be performed in parallel on this device. A value of "
             "-1 means an unlimited number of actions can be performed in "
             "parallel."),
     },
     {
         "pcmk_reboot_action", NULL, "string", NULL,
         PCMK_ACTION_REBOOT, NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "An alternate command to run instead of 'reboot'"),
         N_("Some devices do not support the standard commands or may provide "
             "additional ones. Use this to specify an alternate, device-"
             "specific, command that implements the 'reboot' action."),
     },
     {
         "pcmk_reboot_timeout", NULL, "time", NULL,
         "60s", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "Specify an alternate timeout to use for 'reboot' actions instead "
             "of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal. "
             "Use this to specify an alternate, device-specific, timeout for "
             "'reboot' actions."),
     },
     {
         "pcmk_reboot_retries", NULL, "integer", NULL,
         "2", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "The maximum number of times to try the 'reboot' command within "
             "the timeout period"),
         N_("Some devices do not support multiple connections. Operations may "
             "\"fail\" if the device is busy with another task. In that case, "
             "Pacemaker will automatically retry the operation if there is time "
             "remaining. Use this option to alter the number of times Pacemaker "
             "tries a 'reboot' action before giving up."),
     },
     {
         "pcmk_off_action", NULL, "string", NULL,
         PCMK_ACTION_OFF, NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "An alternate command to run instead of 'off'"),
         N_("Some devices do not support the standard commands or may provide "
             "additional ones. Use this to specify an alternate, device-"
             "specific, command that implements the 'off' action."),
     },
     {
         "pcmk_off_timeout", NULL, "time", NULL,
         "60s", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "Specify an alternate timeout to use for 'off' actions instead of "
             "stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal. "
             "Use this to specify an alternate, device-specific, timeout for "
             "'off' actions."),
     },
     {
         "pcmk_off_retries", NULL, "integer", NULL,
         "2", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "The maximum number of times to try the 'off' command within the "
             "timeout period"),
         N_("Some devices do not support multiple connections. Operations may "
             "\"fail\" if the device is busy with another task. In that case, "
             "Pacemaker will automatically retry the operation if there is time "
             "remaining. Use this option to alter the number of times Pacemaker "
             "tries a 'off' action before giving up."),
     },
     {
         "pcmk_on_action", NULL, "string", NULL,
         PCMK_ACTION_ON, NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "An alternate command to run instead of 'on'"),
         N_("Some devices do not support the standard commands or may provide "
             "additional ones. Use this to specify an alternate, device-"
             "specific, command that implements the 'on' action."),
     },
     {
         "pcmk_on_timeout", NULL, "time", NULL,
         "60s", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "Specify an alternate timeout to use for 'on' actions instead of "
             "stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal. "
             "Use this to specify an alternate, device-specific, timeout for "
             "'on' actions."),
     },
     {
         "pcmk_on_retries", NULL, "integer", NULL,
         "2", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "The maximum number of times to try the 'on' command within the "
             "timeout period"),
         N_("Some devices do not support multiple connections. Operations may "
             "\"fail\" if the device is busy with another task. In that case, "
             "Pacemaker will automatically retry the operation if there is time "
             "remaining. Use this option to alter the number of times Pacemaker "
             "tries a 'on' action before giving up."),
     },
     {
         "pcmk_list_action", NULL, "string", NULL,
         PCMK_ACTION_LIST, NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "An alternate command to run instead of 'list'"),
         N_("Some devices do not support the standard commands or may provide "
             "additional ones. Use this to specify an alternate, device-"
             "specific, command that implements the 'list' action."),
     },
     {
         "pcmk_list_timeout", NULL, "time", NULL,
         "60s", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "Specify an alternate timeout to use for 'list' actions instead of "
             "stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal. "
             "Use this to specify an alternate, device-specific, timeout for "
             "'list' actions."),
     },
     {
         "pcmk_list_retries", NULL, "integer", NULL,
         "2", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "The maximum number of times to try the 'list' command within the "
             "timeout period"),
         N_("Some devices do not support multiple connections. Operations may "
             "\"fail\" if the device is busy with another task. In that case, "
             "Pacemaker will automatically retry the operation if there is time "
             "remaining. Use this option to alter the number of times Pacemaker "
             "tries a 'list' action before giving up."),
     },
     {
         "pcmk_monitor_action", NULL, "string", NULL,
         PCMK_ACTION_MONITOR, NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "An alternate command to run instead of 'monitor'"),
         N_("Some devices do not support the standard commands or may provide "
             "additional ones. Use this to specify an alternate, device-"
             "specific, command that implements the 'monitor' action."),
     },
     {
         "pcmk_monitor_timeout", NULL, "time", NULL,
         "60s", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "Specify an alternate timeout to use for 'monitor' actions instead "
             "of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal. "
             "Use this to specify an alternate, device-specific, timeout for "
             "'monitor' actions."),
     },
     {
         "pcmk_monitor_retries", NULL, "integer", NULL,
         "2", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "The maximum number of times to try the 'monitor' command within "
             "the timeout period"),
         N_("Some devices do not support multiple connections. Operations may "
             "\"fail\" if the device is busy with another task. In that case, "
             "Pacemaker will automatically retry the operation if there is time "
             "remaining. Use this option to alter the number of times Pacemaker "
             "tries a 'monitor' action before giving up."),
     },
     {
         "pcmk_status_action", NULL, "string", NULL,
         PCMK_ACTION_STATUS, NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "An alternate command to run instead of 'status'"),
         N_("Some devices do not support the standard commands or may provide "
             "additional ones. Use this to specify an alternate, device-"
             "specific, command that implements the 'status' action."),
     },
     {
         "pcmk_status_timeout", NULL, "time", NULL,
         "60s", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "Specify an alternate timeout to use for 'status' actions instead "
             "of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal. "
             "Use this to specify an alternate, device-specific, timeout for "
             "'status' actions."),
     },
     {
         "pcmk_status_retries", NULL, "integer", NULL,
         "2", NULL,
         pcmk__opt_context_none,
         N_("*** Advanced Use Only *** "
             "The maximum number of times to try the 'status' command within "
             "the timeout period"),
         N_("Some devices do not support multiple connections. Operations may "
             "\"fail\" if the device is busy with another task. In that case, "
             "Pacemaker will automatically retry the operation if there is time "
             "remaining. Use this option to alter the number of times Pacemaker "
             "tries a 'status' action before giving up."),
     },
 };
 
 void
 fencer_metadata(void)
 {
     const char *name = "pacemaker-fenced";
     const char *desc_short = N_("Instance attributes available for all "
                                 "\"stonith\"-class resources");
     const char *desc_long = N_("Instance attributes available for all "
                                "\"stonith\"-class resources and used by "
                                "Pacemaker's fence daemon, formerly known as "
                                "stonithd");
 
     gchar *s = pcmk__format_option_metadata(name, desc_short, desc_long,
                                             pcmk__opt_context_none,
                                             fencer_options,
                                             PCMK__NELEM(fencer_options));
     printf("%s", s);
     g_free(s);
 }
 
 static GOptionEntry entries[] = {
     { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone,
       N_("Deprecated (will be removed in a future release)"), NULL },
 
     { "stand-alone-w-cpg", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       stand_alone_cpg_cb, N_("Intended for use in regression testing only"), NULL },
 
     { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY,
       &options.log_files, N_("Send logs to the additional named logfile"), NULL },
 
     { NULL }
 };
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group)
 {
     GOptionContext *context = NULL;
 
     context = pcmk__build_arg_context(args, "text (default), xml", group,
                                       "[metadata]");
     pcmk__add_main_args(context, entries);
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     int rc = pcmk_rc_ok;
     crm_cluster_t *cluster = NULL;
     crm_ipc_t *old_instance = NULL;
 
     GError *error = NULL;
 
     GOptionGroup *output_group = NULL;
     pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
     gchar **processed_args = pcmk__cmdline_preproc(argv, "l");
     GOptionContext *context = build_arg_context(args, &output_group);
 
     crm_log_preinit(NULL, argc, argv);
 
     pcmk__register_formats(output_group, formats);
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_ERROR;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Error creating output format %s: %s",
                     args->output_ty, pcmk_rc_str(rc));
         goto done;
     }
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     if ((g_strv_length(processed_args) >= 2)
         && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) {
         fencer_metadata();
         goto done;
     }
 
     // Open additional log files
     pcmk__add_logfiles(options.log_files, out);
 
     crm_log_init(NULL, LOG_INFO + args->verbosity, TRUE,
                  (args->verbosity > 0), argc, argv, FALSE);
 
     crm_notice("Starting Pacemaker fencer");
 
     old_instance = crm_ipc_new("stonith-ng", 0);
     if (old_instance == NULL) {
         /* crm_ipc_new() will have already logged an error message with
          * crm_err()
          */
         exit_code = CRM_EX_FATAL;
         goto done;
     }
 
     if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) {
         // IPC endpoint already up
         crm_ipc_close(old_instance);
         crm_ipc_destroy(old_instance);
         crm_err("pacemaker-fenced is already active, aborting startup");
         goto done;
     } else {
         // Not up or not authentic, we'll proceed either way
         crm_ipc_destroy(old_instance);
         old_instance = NULL;
     }
 
     mainloop_add_signal(SIGTERM, stonith_shutdown);
 
     crm_peer_init();
 
     rc = fenced_scheduler_init();
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_FATAL;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Error initializing scheduler data: %s", pcmk_rc_str(rc));
         goto done;
     }
 
     cluster = pcmk_cluster_new();
 
     if (!stand_alone) {
 #if SUPPORT_COROSYNC
         if (is_corosync_cluster()) {
             cluster->destroy = stonith_peer_cs_destroy;
             cluster->cpg.cpg_deliver_fn = stonith_peer_ais_callback;
             cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership;
         }
 #endif // SUPPORT_COROSYNC
 
         crm_set_status_callback(&st_peer_update_callback);
 
         if (crm_cluster_connect(cluster) == FALSE) {
             exit_code = CRM_EX_FATAL;
             crm_crit("Cannot sign in to the cluster... terminating");
             goto done;
         }
         pcmk__str_update(&stonith_our_uname, cluster->uname);
 
         if (!options.no_cib_connect) {
             setup_cib();
         }
 
     } else {
         pcmk__str_update(&stonith_our_uname, "localhost");
         crm_warn("Stand-alone mode is deprecated and will be removed "
                  "in a future release");
     }
 
     init_device_list();
     init_topology_list();
 
     pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks);
 
     // Create the mainloop and run it...
     mainloop = g_main_loop_new(NULL, FALSE);
     crm_notice("Pacemaker fencer successfully started and accepting connections");
     g_main_loop_run(mainloop);
 
 done:
     g_strfreev(processed_args);
     pcmk__free_arg_context(context);
 
     g_strfreev(options.log_files);
 
     stonith_cleanup();
     pcmk_cluster_free(cluster);
     fenced_scheduler_cleanup();
 
     pcmk__output_and_clear_error(&error, out);
 
     if (out != NULL) {
         out->finish(out, exit_code, true, NULL);
         pcmk__output_free(out);
     }
 
     pcmk__unregister_formats();
     crm_exit(exit_code);
 }
diff --git a/include/crm/common/xml_names_internal.h b/include/crm/common/xml_names_internal.h
index 298b7a69f7..2a6313efac 100644
--- a/include/crm/common/xml_names_internal.h
+++ b/include/crm/common/xml_names_internal.h
@@ -1,237 +1,238 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_COMMON_XML_NAMES_INTERNAL__H
 #  define PCMK__CRM_COMMON_XML_NAMES_INTERNAL__H
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /*
  * XML element names used only by internal code
  */
 
 #define PCMK__XE_ACK                    "ack"
 #define PCMK__XE_ATTRIBUTES             "attributes"
 #define PCMK__XE_CRM_EVENT              "crm_event"
 #define PCMK__XE_CRM_XML                "crm_xml"
 #define PCMK__XE_DIV                    "div"
 #define PCMK__XE_DOWNED                 "downed"
 #define PCMK__XE_FAILED                 "failed"
 #define PCMK__XE_FAILED_UPDATE          "failed_update"
 #define PCMK__XE_GENERATION_TUPLE       "generation_tuple"
 #define PCMK__XE_LRM                    "lrm"
 #define PCMK__XE_LRM_RESOURCE           "lrm_resource"
 #define PCMK__XE_LRM_RESOURCES          "lrm_resources"
 #define PCMK__XE_LRM_RSC_OP             "lrm_rsc_op"
 #define PCMK__XE_MAINTENANCE            "maintenance"
 #define PCMK__XE_META                   "meta"
 #define PCMK__XE_NACK                   "nack"
 #define PCMK__XE_NODE_STATE             "node_state"
 #define PCMK__XE_OPTIONS                "options"
 #define PCMK__XE_PARAM                  "param"
 #define PCMK__XE_PING                   "ping"
 #define PCMK__XE_PING_RESPONSE          "ping_response"
 #define PCMK__XE_PSEUDO_EVENT           "pseudo_event"
 #define PCMK__XE_RSC_OP                 "rsc_op"
 #define PCMK__XE_SHUTDOWN               "shutdown"
 #define PCMK__XE_SPAN                   "span"
 #define PCMK__XE_TICKET_STATE           "ticket_state"
 #define PCMK__XE_TRANSIENT_ATTRIBUTES   "transient_attributes"
 #define PCMK__XE_TRANSITION_GRAPH       "transition_graph"
 #define PCMK__XE_XPATH_QUERY            "xpath-query"
 #define PCMK__XE_XPATH_QUERY_PATH       "xpath-query-path"
 
 // @COMPAT Deprecated since 1.1.12
 #define PCMK__XE_ACL_USER               "acl_user"
 
 /* @COMPAT Deprecate somehow. It's undocumented and behaves the same as
  * PCMK__XE_CIB in places where it's recognized.
  */
 #define PCMK__XE_ALL                    "all"
 
 // @COMPAT Deprecated since 2.1.7
 #define PCMK__XE_DIFF_ADDED             "diff-added"
 
 // @COMPAT Deprecated since 2.1.7
 #define PCMK__XE_DIFF_REMOVED           "diff-removed"
 
 // @COMPAT Deprecated since 1.0.8 (commit 4cb100f)
 #define PCMK__XE_LIFETIME               "lifetime"
 
 /* @COMPAT Deprecated since 2.0.0; alias for <clone> with PCMK_META_PROMOTABLE
  * set to "true"
  */
 #define PCMK__XE_PROMOTABLE_LEGACY      "master"
 
 // @COMPAT Support for rkt is deprecated since 2.1.8
 #define PCMK__XE_RKT                    "rkt"
 
 // @COMPAT Deprecated since 1.1.12
 #define PCMK__XE_ROLE_REF               "role_ref"
 
 
 /*
  * XML attribute names used only by internal code
  */
 
 #define PCMK__XA_ATTR_CLEAR_INTERVAL    "attr_clear_interval"
 #define PCMK__XA_ATTR_CLEAR_OPERATION   "attr_clear_operation"
 #define PCMK__XA_ATTR_DAMPENING         "attr_dampening"
 #define PCMK__XA_ATTR_HOST              "attr_host"
 #define PCMK__XA_ATTR_HOST_ID           "attr_host_id"
 #define PCMK__XA_ATTR_IS_PRIVATE        "attr_is_private"
 #define PCMK__XA_ATTR_IS_REMOTE         "attr_is_remote"
 #define PCMK__XA_ATTR_KEY               "attr_key"
 #define PCMK__XA_ATTR_NAME              "attr_name"
 #define PCMK__XA_ATTR_REGEX             "attr_regex"
 #define PCMK__XA_ATTR_RESOURCE          "attr_resource"
 #define PCMK__XA_ATTR_SECTION           "attr_section"
 #define PCMK__XA_ATTR_SET               "attr_set"
 #define PCMK__XA_ATTR_SET_TYPE          "attr_set_type"
 #define PCMK__XA_ATTR_SYNC_POINT        "attr_sync_point"
 #define PCMK__XA_ATTR_USER              "attr_user"
 #define PCMK__XA_ATTR_VALUE             "attr_value"
 #define PCMK__XA_ATTR_VERSION           "attr_version"
 #define PCMK__XA_ATTR_WRITER            "attr_writer"
 #define PCMK__XA_ATTRD_IS_FORCE_WRITE   "attrd_is_force_write"
 #define PCMK__XA_CALL_ID                "call-id"
 #define PCMK__XA_CLIENT_NAME            "client_name"
 #define PCMK__XA_CLIENT_UUID            "client_uuid"
 #define PCMK__XA_CONFIG_ERRORS          "config-errors"
 #define PCMK__XA_CONFIG_WARNINGS        "config-warnings"
 #define PCMK__XA_CONFIRM                "confirm"
 #define PCMK__XA_CONNECTION_HOST        "connection_host"
 #define PCMK__XA_CONTENT                "content"
 #define PCMK__XA_CRMD_STATE             "crmd_state"
 #define PCMK__XA_CRM_HOST_TO            "crm_host_to"
 #define PCMK__XA_CRM_LIMIT_MAX          "crm-limit-max"
 #define PCMK__XA_CRM_LIMIT_MODE         "crm-limit-mode"
 #define PCMK__XA_CRM_SUBSYSTEM          "crm_subsystem"
 #define PCMK__XA_CRM_SYS_FROM           "crm_sys_from"
 #define PCMK__XA_CRM_SYS_TO             "crm_sys_to"
 #define PCMK__XA_CRM_TASK               "crm_task"
 #define PCMK__XA_CRM_TGRAPH_IN          "crm-tgraph-in"
 #define PCMK__XA_CRM_USER               "crm_user"
 #define PCMK__XA_DC_LEAVING             "dc-leaving"
 #define PCMK__XA_DIGEST                 "digest"
 #define PCMK__XA_ELECTION_AGE_SEC       "election-age-sec"
 #define PCMK__XA_ELECTION_AGE_NANO_SEC  "election-age-nano-sec"
 #define PCMK__XA_ELECTION_ID            "election-id"
 #define PCMK__XA_ELECTION_OWNER         "election-owner"
 #define PCMK__XA_GRANTED                "granted"
 #define PCMK__XA_GRAPH_ERRORS           "graph-errors"
 #define PCMK__XA_GRAPH_WARNINGS         "graph-warnings"
 #define PCMK__XA_HIDDEN                 "hidden"
 #define PCMK__XA_HTTP_EQUIV             "http-equiv"
 #define PCMK__XA_IN_CCM                 "in_ccm"
 #define PCMK__XA_JOIN                   "join"
 #define PCMK__XA_JOIN_ID                "join_id"
 #define PCMK__XA_LINE                   "line"
 #define PCMK__XA_LONG_ID                "long-id"
 #define PCMK__XA_MAJOR_VERSION          "major_version"
 #define PCMK__XA_MINOR_VERSION          "minor_version"
 #define PCMK__XA_MODE                   "mode"
 #define PCMK__XA_MOON                   "moon"
 #define PCMK__XA_NAMESPACE              "namespace"
 #define PCMK__XA_NODE_FENCED            "node_fenced"
 #define PCMK__XA_NODE_IN_MAINTENANCE    "node_in_maintenance"
 #define PCMK__XA_NODE_START_STATE       "node_start_state"
 #define PCMK__XA_NODE_STATE             "node_state"
 #define PCMK__XA_OP_DIGEST              "op-digest"
 #define PCMK__XA_OP_FORCE_RESTART       "op-force-restart"
 #define PCMK__XA_OP_RESTART_DIGEST      "op-restart-digest"
 #define PCMK__XA_OP_SECURE_DIGEST       "op-secure-digest"
 #define PCMK__XA_OP_SECURE_PARAMS       "op-secure-params"
 #define PCMK__XA_OP_STATUS              "op-status"
 #define PCMK__XA_OPERATION_KEY          "operation_key"
 #define PCMK__XA_PACEMAKERD_STATE       "pacemakerd_state"
 #define PCMK__XA_PASSWORD               "password"
 #define PCMK__XA_PRIORITY               "priority"
 #define PCMK__XA_RC_CODE                "rc-code"
 #define PCMK__XA_REAP                   "reap"
 
 /* Actions to be executed on Pacemaker Remote nodes are routed through the
  * controller on the cluster node hosting the remote connection. That cluster
  * node is considered the router node for the action.
  */
 #define PCMK__XA_ROUTER_NODE            "router_node"
 
 #define PCMK__XA_RSC_ID                 "rsc-id"
 #define PCMK__XA_RSC_PROVIDES           "rsc_provides"
 #define PCMK__XA_SCHEMA                 "schema"
 #define PCMK__XA_SCHEMAS                "schemas"
 #define PCMK__XA_SRC                    "src"
 #define PCMK__XA_ST_ACTION_DISALLOWED   "st_action_disallowed"
 #define PCMK__XA_ST_ACTION_TIMEOUT      "st_action_timeout"
 #define PCMK__XA_ST_AVAILABLE_DEVICES   "st-available-devices"
 #define PCMK__XA_ST_CALLID              "st_callid"
 #define PCMK__XA_ST_CALLDATA            "st_calldata"
 #define PCMK__XA_ST_CALLOPT             "st_callopt"
 #define PCMK__XA_ST_CLIENTID            "st_clientid"
 #define PCMK__XA_ST_CLIENTNAME          "st_clientname"
+#define PCMK__XA_ST_CLIENTNODE          "st_clientnode"
 #define PCMK__XA_ST_DELAY               "st_delay"
 #define PCMK__XA_ST_DELAY_BASE          "st_delay_base"
 #define PCMK__XA_ST_DELAY_MAX           "st_delay_max"
 #define PCMK__XA_ST_MONITOR_VERIFIED    "st_monitor_verified"
 #define PCMK__XA_ST_OP                  "st_op"
 #define PCMK__XA_ST_OUTPUT              "st_output"
 #define PCMK__XA_ST_RC                  "st_rc"
 #define PCMK__XA_ST_REMOTE_OP           "st_remote_op"
 #define PCMK__XA_ST_REMOTE_OP_RELAY     "st_remote_op_relay"
 #define PCMK__XA_ST_REQUIRED            "st_required"
 #define PCMK__XA_ST_TARGET              "st_target"
 #define PCMK__XA_ST_TIMEOUT             "st_timeout"
 #define PCMK__XA_ST_TOLERANCE           "st_tolerance"
 #define PCMK__XA_SUBT                   "subt"                  // subtype
 #define PCMK__XA_T                      "t"                     // type
 #define PCMK__XA_TRANSITION_KEY         "transition-key"
 #define PCMK__XA_TRANSITION_MAGIC       "transition-magic"
 #define PCMK__XA_UPTIME                 "uptime"
 
 // @COMPAT Used only with v1 patchsets
 #define PCMK__XA_CRM_DIFF_MARKER        "__crm_diff_marker__"
 
 // @COMPAT Deprecated since 2.1.5
 #define PCMK__XA_FIRST_INSTANCE         "first-instance"
 
 // @COMPAT Deprecated since 2.1.7
 #define PCMK__XA_ORDERING               "ordering"
 
 // @COMPAT Deprecated alias for PCMK_XA_PROMOTED_MAX since 2.0.0
 #define PCMK__XA_PROMOTED_MAX_LEGACY    "masters"
 
 // @COMPAT Deprecated alias for PCMK_XA_PROMOTED_ONLY since 2.0.0
 #define PCMK__XA_PROMOTED_ONLY_LEGACY   "master_only"
 
 // @COMPAT Deprecated since 1.1.12
 #define PCMK__XA_REF                    "ref"
 
 // @COMPAT Deprecated since 2.1.6
 #define PCMK__XA_REPLACE                "replace"
 
 // @COMPAT Deprecated alias for \c PCMK_XA_AUTOMATIC since 1.1.14
 #define PCMK__XA_REQUIRED               "required"
 
 // @COMPAT Deprecated since 2.1.5
 #define PCMK__XA_RSC_INSTANCE           "rsc-instance"
 
 // @COMPAT Deprecated since 2.1.5
 #define PCMK__XA_THEN_INSTANCE          "then-instance"
 
 // @COMPAT Deprecated since 2.1.5
 #define PCMK__XA_WITH_RSC_INSTANCE      "with-rsc-instance"
 
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PCMK__CRM_COMMON_XML_NAMES_INTERNAL__H
diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h
index 6d9e8dfb2e..1470462941 100644
--- a/include/crm/fencing/internal.h
+++ b/include/crm/fencing/internal.h
@@ -1,191 +1,190 @@
 /*
  * Copyright 2011-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef STONITH_NG_INTERNAL__H
 #  define STONITH_NG_INTERNAL__H
 
 #  include <glib.h>
 #  include <crm/common/ipc.h>
 #  include <crm/common/xml.h>
 #  include <crm/common/output_internal.h>
 #  include <crm/stonith-ng.h>
 
 enum st_device_flags {
     st_device_supports_none             = (0 << 0),
     st_device_supports_list             = (1 << 0),
     st_device_supports_status           = (1 << 1),
     st_device_supports_reboot           = (1 << 2),
     st_device_supports_parameter_plug   = (1 << 3),
     st_device_supports_parameter_port   = (1 << 4),
     st_device_supports_on               = (1 << 5),
 };
 
 #define stonith__set_device_flags(device_flags, device_id, flags_to_set) do { \
         device_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,      \
                                           "Fence device", device_id,          \
                                           (device_flags), (flags_to_set),     \
                                           #flags_to_set);                     \
     } while (0)
 
 #define stonith__set_call_options(st_call_opts, call_for, flags_to_set) do { \
         st_call_opts = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                           "Fencer call", (call_for),         \
                                           (st_call_opts), (flags_to_set),    \
                                           #flags_to_set);                    \
     } while (0)
 
 #define stonith__clear_call_options(st_call_opts, call_for, flags_to_clear) do { \
         st_call_opts = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                             "Fencer call", (call_for),         \
                                             (st_call_opts), (flags_to_clear),  \
                                             #flags_to_clear);                  \
     } while (0)
 
 struct stonith_action_s;
 typedef struct stonith_action_s stonith_action_t;
 
 stonith_action_t *stonith__action_create(const char *agent,
                                          const char *action_name,
                                          const char *target,
                                          uint32_t target_nodeid,
                                          int timeout_sec,
                                          GHashTable *device_args,
                                          GHashTable *port_map,
                                          const char *host_arg);
 void stonith__destroy_action(stonith_action_t *action);
 pcmk__action_result_t *stonith__action_result(stonith_action_t *action);
 int stonith__result2rc(const pcmk__action_result_t *result);
 void stonith__xe_set_result(xmlNode *xml, const pcmk__action_result_t *result);
 void stonith__xe_get_result(const xmlNode *xml, pcmk__action_result_t *result);
 xmlNode *stonith__find_xe_with_result(xmlNode *xml);
 
 int stonith__execute_async(stonith_action_t *action, void *userdata,
                            void (*done) (int pid,
                                          const pcmk__action_result_t *result,
                                          void *user_data),
                            void (*fork_cb) (int pid, void *user_data));
 
 int stonith__metadata_async(const char *agent, int timeout_sec,
                             void (*callback)(int pid,
                                              const pcmk__action_result_t *result,
                                              void *user_data),
                             void *user_data);
 
 xmlNode *create_level_registration_xml(const char *node, const char *pattern,
                                        const char *attr, const char *value,
                                        int level,
                                        const stonith_key_value_t *device_list);
 
 xmlNode *create_device_registration_xml(const char *id,
                                         enum stonith_namespace namespace,
                                         const char *agent,
                                         const stonith_key_value_t *params,
                                         const char *rsc_provides);
 
 void stonith__register_messages(pcmk__output_t *out);
 
 GList *stonith__parse_targets(const char *hosts);
 
 const char *stonith__later_succeeded(const stonith_history_t *event,
                                      const stonith_history_t *top_history);
 stonith_history_t *stonith__sort_history(stonith_history_t *history);
 
 void stonith__device_parameter_flags(uint32_t *device_flags,
                                      const char *device_name,
                                      xmlNode *metadata);
 
 #  define ST_LEVEL_MAX 10
 
-#  define F_STONITH_CLIENTNODE        "st_clientnode"
 #  define F_STONITH_NOTIFY_ACTIVATE   "st_notify_activate"
 #  define F_STONITH_NOTIFY_DEACTIVATE "st_notify_deactivate"
 #  define F_STONITH_DELEGATE      "st_delegate"
 #  define F_STONITH_DEVICE_SUPPORT_FLAGS "st_device_support_flags"
 /*! The node initiating the stonith operation.  If an operation
  * is relayed, this is the last node the operation lands on. When
  * in standalone mode, origin is the client's id that originated the
  * operation. */
 #  define F_STONITH_ORIGIN        "st_origin"
 #  define F_STONITH_HISTORY_LIST  "st_history"
 #  define F_STONITH_DATE          "st_date"
 #  define F_STONITH_DATE_NSEC     "st_date_nsec"
 #  define F_STONITH_STATE         "st_state"
 #  define F_STONITH_ACTIVE        "st_active"
 #  define F_STONITH_DIFFERENTIAL  "st_differential"
 
 #  define F_STONITH_DEVICE        "st_device_id"
 #  define F_STONITH_ACTION        "st_device_action"
 #  define F_STONITH_MERGED        "st_op_merged"
 
 #  define T_STONITH_NG        "stonith-ng"
 #  define T_STONITH_REPLY     "st-reply"
 /*! For async operations, an event from the server containing
  * the total amount of time the server is allowing for the operation
  * to take place is returned to the client. */
 #  define T_STONITH_TIMEOUT_VALUE "st-async-timeout-value"
 #  define T_STONITH_NOTIFY    "st_notify"
 
 #  define STONITH_ATTR_ACTION_OP   "action"
 
 #  define STONITH_OP_EXEC        "st_execute"
 #  define STONITH_OP_TIMEOUT_UPDATE        "st_timeout_update"
 #  define STONITH_OP_QUERY       "st_query"
 #  define STONITH_OP_FENCE       "st_fence"
 #  define STONITH_OP_RELAY       "st_relay"
 #  define STONITH_OP_DEVICE_ADD      "st_device_register"
 #  define STONITH_OP_DEVICE_DEL      "st_device_remove"
 #  define STONITH_OP_FENCE_HISTORY   "st_fence_history"
 #  define STONITH_OP_LEVEL_ADD       "st_level_add"
 #  define STONITH_OP_LEVEL_DEL       "st_level_remove"
 
 #  define STONITH_WATCHDOG_AGENT          "fence_watchdog"
 /* Don't change 2 below as it would break rolling upgrade */
 #  define STONITH_WATCHDOG_AGENT_INTERNAL "#watchdog"
 #  define STONITH_WATCHDOG_ID             "watchdog"
 
 stonith_history_t *stonith__first_matching_event(stonith_history_t *history,
                                                  bool (*matching_fn)(stonith_history_t *, void *),
                                                  void *user_data);
 bool stonith__event_state_pending(stonith_history_t *history, void *user_data);
 bool stonith__event_state_eq(stonith_history_t *history, void *user_data);
 bool stonith__event_state_neq(stonith_history_t *history, void *user_data);
 
 int stonith__legacy2status(int rc);
 
 int stonith__exit_status(const stonith_callback_data_t *data);
 int stonith__execution_status(const stonith_callback_data_t *data);
 const char *stonith__exit_reason(const stonith_callback_data_t *data);
 
 int stonith__event_exit_status(const stonith_event_t *event);
 int stonith__event_execution_status(const stonith_event_t *event);
 const char *stonith__event_exit_reason(const stonith_event_t *event);
 char *stonith__event_description(const stonith_event_t *event);
 gchar *stonith__history_description(const stonith_history_t *event,
                                     bool full_history,
                                     const char *later_succeeded,
                                     uint32_t show_opts);
 
 /*!
  * \internal
  * \brief Is a fencing operation in pending state?
  *
  * \param[in] state     State as enum op_state value
  *
  * \return A boolean
  */
 static inline bool
 stonith__op_state_pending(enum op_state state)
 {
     return state != st_failed && state != st_done;
 }
 
 gboolean stonith__watchdog_fencing_enabled_for_node(const char *node);
 gboolean stonith__watchdog_fencing_enabled_for_node_api(stonith_t *st, const char *node);
 
 #endif