diff --git a/daemons/fenced/fenced_commands.c b/daemons/fenced/fenced_commands.c
index ea36910042..7e43130478 100644
--- a/daemons/fenced/fenced_commands.c
+++ b/daemons/fenced/fenced_commands.c
@@ -1,2729 +1,2733 @@
 /*
  * Copyright 2009-2019 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/mainloop.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #if SUPPORT_CIBSECRETS
 #  include <crm/common/cib_secrets.h>
 #endif
 
 #include <pacemaker-fenced.h>
 
 GHashTable *device_list = NULL;
 GHashTable *topology = NULL;
 GList *cmd_list = NULL;
 
 struct device_search_s {
     /* target of fence action */
     char *host;
     /* requested fence action */
     char *action;
     /* timeout to use if a device is queried dynamically for possible targets */
     int per_device_timeout;
     /* number of registered fencing devices at time of request */
     int replies_needed;
     /* number of device replies received so far */
     int replies_received;
     /* whether the target is eligible to perform requested action (or off) */
     bool allow_suicide;
 
     /* private data to pass to search callback function */
     void *user_data;
     /* function to call when all replies have been received */
     void (*callback) (GList * devices, void *user_data);
     /* devices capable of performing requested action (or off if remapping) */
     GListPtr capable;
 };
 
 static gboolean stonith_device_dispatch(gpointer user_data);
 static void st_child_done(GPid pid, int rc, const char *output, gpointer user_data);
 static void stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
                                const char *client_id);
 
 static void search_devices_record_result(struct device_search_s *search, const char *device,
                                          gboolean can_fence);
 
 typedef struct async_command_s {
 
     int id;
     int pid;
     int fd_stdout;
     int options;
     int default_timeout; /* seconds */
     int timeout; /* seconds */
 
     int start_delay; /* milliseconds */
     int delay_id;
 
     char *op;
     char *origin;
     char *client;
     char *client_name;
     char *remote_op_id;
 
     char *victim;
     uint32_t victim_nodeid;
     char *action;
     char *device;
     char *mode;
 
     GListPtr device_list;
     GListPtr device_next;
 
     void *internal_user_data;
     void (*done_cb) (GPid pid, int rc, const char *output, gpointer user_data);
     guint timer_sigterm;
     guint timer_sigkill;
     /*! If the operation timed out, this is the last signal
      *  we sent to the process to get it to terminate */
     int last_timeout_signo;
 
     stonith_device_t *active_on;
     stonith_device_t *activating_on;
 } async_command_t;
 
 static xmlNode *stonith_construct_async_reply(async_command_t * cmd, const char *output,
                                               xmlNode * data, int rc);
 
 static gboolean
 is_action_required(const char *action, stonith_device_t *device)
 {
     return device && device->automatic_unfencing && safe_str_eq(action, "on");
 }
 
 static int
 get_action_delay_max(stonith_device_t * device, const char * action)
 {
     const char *value = NULL;
     int delay_max_ms = 0;
 
     if (safe_str_neq(action, "off") && safe_str_neq(action, "reboot")) {
         return 0;
     }
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_MAX);
     if (value) {
        delay_max_ms = crm_get_msec(value);
     }
 
     return delay_max_ms;
 }
 
 static int
 get_action_delay_base(stonith_device_t * device, const char * action)
 {
     const char *value = NULL;
     int delay_base_ms = 0;
 
     if (safe_str_neq(action, "off") && safe_str_neq(action, "reboot")) {
         return 0;
     }
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_DELAY_BASE);
     if (value) {
        delay_base_ms = crm_get_msec(value);
     }
 
     return delay_base_ms;
 }
 
 /*!
  * \internal
  * \brief Override STONITH timeout with pcmk_*_timeout if available
  *
  * \param[in] device           STONITH device to use
  * \param[in] action           STONITH action name
  * \param[in] default_timeout  Timeout to use if device does not have
  *                             a pcmk_*_timeout parameter for action
  *
  * \return Value of pcmk_(action)_timeout if available, otherwise default_timeout
  * \note For consistency, it would be nice if reboot/off/on timeouts could be
  *       set the same way as start/stop/monitor timeouts, i.e. with an
  *       <operation> entry in the fencing resource configuration. However that
  *       is insufficient because fencing devices may be registered directly via
  *       the fencer's register_device() API instead of going through the CIB
  *       (e.g. stonith_admin uses it for its -R option, and the executor uses it
  *       to ensure a device is registered when a command is issued). As device
  *       properties, pcmk_*_timeout parameters can be grabbed by the fencer when
  *       the device is registered, whether by CIB change or API call.
  */
 static int
 get_action_timeout(stonith_device_t * device, const char *action, int default_timeout)
 {
     if (action && device && device->params) {
         char buffer[64] = { 0, };
         const char *value = NULL;
 
         /* If "reboot" was requested but the device does not support it,
          * we will remap to "off", so check timeout for "off" instead
          */
         if (safe_str_eq(action, "reboot")
             && is_not_set(device->flags, st_device_supports_reboot)) {
             crm_trace("%s doesn't support reboot, using timeout for off instead",
                       device->id);
             action = "off";
         }
 
         /* If the device config specified an action-specific timeout, use it */
         snprintf(buffer, sizeof(buffer), "pcmk_%s_timeout", action);
         value = g_hash_table_lookup(device->params, buffer);
         if (value) {
             return atoi(value);
         }
     }
     return default_timeout;
 }
 
 static void
 free_async_command(async_command_t * cmd)
 {
     if (!cmd) {
         return;
     }
 
     if (cmd->delay_id) {
         g_source_remove(cmd->delay_id);
     }
 
     cmd_list = g_list_remove(cmd_list, cmd);
 
     g_list_free_full(cmd->device_list, free);
     free(cmd->device);
     free(cmd->action);
     free(cmd->victim);
     free(cmd->remote_op_id);
     free(cmd->client);
     free(cmd->client_name);
     free(cmd->origin);
     free(cmd->mode);
     free(cmd->op);
     free(cmd);
 }
 
 static async_command_t *
 create_async_command(xmlNode * msg)
 {
     async_command_t *cmd = NULL;
     xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR);
     const char *action = crm_element_value(op, F_STONITH_ACTION);
 
     CRM_CHECK(action != NULL, crm_log_xml_warn(msg, "NoAction"); return NULL);
 
     crm_log_xml_trace(msg, "Command");
     cmd = calloc(1, sizeof(async_command_t));
     crm_element_value_int(msg, F_STONITH_CALLID, &(cmd->id));
     crm_element_value_int(msg, F_STONITH_CALLOPTS, &(cmd->options));
     crm_element_value_int(msg, F_STONITH_TIMEOUT, &(cmd->default_timeout));
     cmd->timeout = cmd->default_timeout;
 
     cmd->origin = crm_element_value_copy(msg, F_ORIG);
     cmd->remote_op_id = crm_element_value_copy(msg, F_STONITH_REMOTE_OP_ID);
     cmd->client = crm_element_value_copy(msg, F_STONITH_CLIENTID);
     cmd->client_name = crm_element_value_copy(msg, F_STONITH_CLIENTNAME);
     cmd->op = crm_element_value_copy(msg, F_STONITH_OPERATION);
     cmd->action = strdup(action);
     cmd->victim = crm_element_value_copy(op, F_STONITH_TARGET);
     cmd->mode = crm_element_value_copy(op, F_STONITH_MODE);
     cmd->device = crm_element_value_copy(op, F_STONITH_DEVICE);
 
     CRM_CHECK(cmd->op != NULL, crm_log_xml_warn(msg, "NoOp"); free_async_command(cmd); return NULL);
     CRM_CHECK(cmd->client != NULL, crm_log_xml_warn(msg, "NoClient"));
 
     cmd->done_cb = st_child_done;
     cmd_list = g_list_append(cmd_list, cmd);
     return cmd;
 }
 
 static int
 get_action_limit(stonith_device_t * device)
 {
     const char *value = NULL;
     int action_limit = 1;
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_ACTION_LIMIT);
     if (value) {
        action_limit = crm_parse_int(value, "1");
        if (action_limit == 0) {
            /* pcmk_action_limit should not be 0. Enforce it to be 1. */
            action_limit = 1;
        }
     }
 
     return action_limit;
 }
 
 static int
 get_active_cmds(stonith_device_t * device)
 {
     int counter = 0;
     GListPtr gIter = NULL;
     GListPtr gIterNext = NULL;
 
     CRM_CHECK(device != NULL, return 0);
 
     for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
         async_command_t *cmd = gIter->data;
 
         gIterNext = gIter->next;
 
         if (cmd->active_on == device) {
             counter++;
         }
     }
 
     return counter;
 }
 
 static void
 fork_cb(GPid pid, gpointer user_data)
 {
     async_command_t *cmd = (async_command_t *) user_data;
     stonith_device_t * device =
         /* in case of a retry we've done the move from
            activating_on to active_on already
          */
         cmd->activating_on?cmd->activating_on:cmd->active_on;
 
     CRM_ASSERT(device);
     crm_debug("Operation '%s'%s%s on %s now running with pid=%d, timeout=%ds",
                   cmd->action, cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "",
                   device->id, pid, cmd->timeout);
     cmd->active_on = device;
     cmd->activating_on = NULL;
 }
 
 static gboolean
 stonith_device_execute(stonith_device_t * device)
 {
     int exec_rc = 0;
     const char *action_str = NULL;
     async_command_t *cmd = NULL;
     stonith_action_t *action = NULL;
     int active_cmds = 0;
     int action_limit = 0;
     GListPtr gIter = NULL;
     GListPtr gIterNext = NULL;
 
     CRM_CHECK(device != NULL, return FALSE);
 
     active_cmds = get_active_cmds(device);
     action_limit = get_action_limit(device);
     if (action_limit > -1 && active_cmds >= action_limit) {
         crm_trace("%s is over its action limit of %d (%u active action%s)",
                   device->id, action_limit, active_cmds, active_cmds > 1 ? "s" : "");
         return TRUE;
     }
 
     for (gIter = device->pending_ops; gIter != NULL; gIter = gIterNext) {
         async_command_t *pending_op = gIter->data;
 
         gIterNext = gIter->next;
 
         if (pending_op && pending_op->delay_id) {
             crm_trace
                 ("Operation '%s'%s%s on %s was asked to run too early, waiting for start_delay timeout of %dms",
                  pending_op->action, pending_op->victim ? " targeting " : "",
                  pending_op->victim ? pending_op->victim : "",
                  device->id, pending_op->start_delay);
             continue;
         }
 
         device->pending_ops = g_list_remove_link(device->pending_ops, gIter);
         g_list_free_1(gIter);
 
         cmd = pending_op;
         break;
     }
 
     if (cmd == NULL) {
         crm_trace("Nothing further to do for %s for now", device->id);
         return TRUE;
     }
 
     if(safe_str_eq(device->agent, STONITH_WATCHDOG_AGENT)) {
         if(safe_str_eq(cmd->action, "reboot")) {
             pcmk_panic(__FUNCTION__);
             goto done;
 
         } else if(safe_str_eq(cmd->action, "off")) {
             pcmk_panic(__FUNCTION__);
             goto done;
 
         } else {
             crm_info("Faking success for %s watchdog operation", cmd->action);
             cmd->done_cb(0, 0, NULL, cmd);
             goto done;
         }
     }
 
 #if SUPPORT_CIBSECRETS
     if (replace_secret_params(device->id, device->params) < 0) {
         /* replacing secrets failed! */
         if (safe_str_eq(cmd->action,"stop")) {
             /* don't fail on stop! */
             crm_info("proceeding with the stop operation for %s", device->id);
 
         } else {
             crm_err("failed to get secrets for %s, "
                     "considering resource not configured", device->id);
             exec_rc = PCMK_OCF_NOT_CONFIGURED;
             cmd->done_cb(0, exec_rc, NULL, cmd);
             goto done;
         }
     }
 #endif
 
     action_str = cmd->action;
     if (safe_str_eq(cmd->action, "reboot") && is_not_set(device->flags, st_device_supports_reboot)) {
         crm_warn("Agent '%s' does not advertise support for 'reboot', performing 'off' action instead", device->agent);
         action_str = "off";
     }
 
     action = stonith_action_create(device->agent,
                                    action_str,
                                    cmd->victim,
                                    cmd->victim_nodeid,
                                    cmd->timeout, device->params, device->aliases);
 
     /* for async exec, exec_rc is negative for early error exit
        otherwise handling of success/errors is done via callbacks */
     cmd->activating_on = device;
     exec_rc = stonith_action_execute_async(action, (void *)cmd,
                                            cmd->done_cb, fork_cb);
 
     if (exec_rc < 0) {
         crm_warn("Operation '%s'%s%s on %s failed: %s (%d)",
                  cmd->action, cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "",
                  device->id, pcmk_strerror(exec_rc), exec_rc);
         cmd->activating_on = NULL;
         cmd->done_cb(0, exec_rc, NULL, cmd);
     }
 
 done:
     /* Device might get triggered to work by multiple fencing commands
      * simultaneously. Trigger the device again to make sure any
      * remaining concurrent commands get executed. */
     if (device->pending_ops) {
         mainloop_set_trigger(device->work);
     }
     return TRUE;
 }
 
 static gboolean
 stonith_device_dispatch(gpointer user_data)
 {
     return stonith_device_execute(user_data);
 }
 
 static gboolean
 start_delay_helper(gpointer data)
 {
     async_command_t *cmd = data;
     stonith_device_t *device = NULL;
 
     cmd->delay_id = 0;
     device = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
 
     if (device) {
         mainloop_set_trigger(device->work);
     }
 
     return FALSE;
 }
 
 static void
 schedule_stonith_command(async_command_t * cmd, stonith_device_t * device)
 {
     int delay_max = 0;
     int delay_base = 0;
 
     CRM_CHECK(cmd != NULL, return);
     CRM_CHECK(device != NULL, return);
 
     if (cmd->device) {
         free(cmd->device);
     }
 
     if (device->include_nodeid && cmd->victim) {
         crm_node_t *node = crm_get_peer(0, cmd->victim);
 
         cmd->victim_nodeid = node->id;
     }
 
     cmd->device = strdup(device->id);
     cmd->timeout = get_action_timeout(device, cmd->action, cmd->default_timeout);
 
     if (cmd->remote_op_id) {
         crm_debug("Scheduling '%s' action%s%s on %s for remote peer %s with op id (%s) (timeout=%ds)",
                   cmd->action,
                   cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "",
                   device->id, cmd->origin, cmd->remote_op_id, cmd->timeout);
     } else {
         crm_debug("Scheduling '%s' action%s%s on %s for %s (timeout=%ds)",
                   cmd->action,
                   cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "",
                   device->id, cmd->client, cmd->timeout);
     }
 
     device->pending_ops = g_list_append(device->pending_ops, cmd);
     mainloop_set_trigger(device->work);
 
     delay_max = get_action_delay_max(device, cmd->action);
     delay_base = get_action_delay_base(device, cmd->action);
     if (delay_max == 0) {
         delay_max = delay_base;
     }
     if (delay_max < delay_base) {
         crm_warn("Base-delay (%dms) is larger than max-delay (%dms) "
                  "for %s on %s - limiting to max-delay",
                  delay_base, delay_max, cmd->action, device->id);
         delay_base = delay_max;
     }
     if (delay_max > 0) {
         // coverity[dont_call] We're not using rand() for security
         cmd->start_delay =
             ((delay_max != delay_base)?(rand() % (delay_max - delay_base)):0)
             + delay_base;
         crm_notice("Delaying '%s' action%s%s on %s for %dms (timeout=%ds, base=%dms, "
                    "max=%dms)",
                     cmd->action,
                     cmd->victim ? " targeting " : "", cmd->victim ? cmd->victim : "",
                     device->id, cmd->start_delay, cmd->timeout,
                     delay_base, delay_max);
         cmd->delay_id =
             g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
     }
 }
 
 static void
 free_device(gpointer data)
 {
     GListPtr gIter = NULL;
     stonith_device_t *device = data;
 
     g_hash_table_destroy(device->params);
     g_hash_table_destroy(device->aliases);
 
     for (gIter = device->pending_ops; gIter != NULL; gIter = gIter->next) {
         async_command_t *cmd = gIter->data;
 
         crm_warn("Removal of device '%s' purged operation '%s'", device->id, cmd->action);
         cmd->done_cb(0, -ENODEV, NULL, cmd);
     }
     g_list_free(device->pending_ops);
 
     g_list_free_full(device->targets, free);
 
     mainloop_destroy_trigger(device->work);
 
     free_xml(device->agent_metadata);
     free(device->namespace);
     free(device->on_target_actions);
     free(device->agent);
     free(device->id);
     free(device);
 }
 
 void free_device_list()
 {
     if (device_list != NULL) {
         g_hash_table_destroy(device_list);
         device_list = NULL;
     }
 }
 
 void
 init_device_list()
 {
     if (device_list == NULL) {
         device_list = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL,
                                             free_device);
     }
 }
 
 static GHashTable *
 build_port_aliases(const char *hostmap, GListPtr * targets)
 {
     char *name = NULL;
     int last = 0, lpc = 0, max = 0, added = 0;
     GHashTable *aliases = crm_strcase_table_new();
 
     if (hostmap == NULL) {
         return aliases;
     }
 
     max = strlen(hostmap);
     for (; lpc <= max; lpc++) {
         switch (hostmap[lpc]) {
                 /* Assignment chars */
             case '=':
             case ':':
                 if (lpc > last) {
                     free(name);
                     name = calloc(1, 1 + lpc - last);
                     memcpy(name, hostmap + last, lpc - last);
                 }
                 last = lpc + 1;
                 break;
 
                 /* Delimeter chars */
                 /* case ',': Potentially used to specify multiple ports */
             case 0:
             case ';':
             case ' ':
             case '\t':
                 if (name) {
                     char *value = NULL;
 
                     value = calloc(1, 1 + lpc - last);
                     memcpy(value, hostmap + last, lpc - last);
 
                     crm_debug("Adding alias '%s'='%s'", name, value);
                     g_hash_table_replace(aliases, name, value);
                     if (targets) {
                         *targets = g_list_append(*targets, strdup(value));
                     }
                     value = NULL;
                     name = NULL;
                     added++;
 
                 } else if (lpc > last) {
                     crm_debug("Parse error at offset %d near '%s'", lpc - last, hostmap + last);
                 }
 
                 last = lpc + 1;
                 break;
         }
 
         if (hostmap[lpc] == 0) {
             break;
         }
     }
 
     if (added == 0) {
         crm_info("No host mappings detected in '%s'", hostmap);
     }
 
     free(name);
     return aliases;
 }
 
 GHashTable *metadata_cache = NULL;
 
 void
 free_metadata_cache() {
     if (metadata_cache != NULL) {
         g_hash_table_destroy(metadata_cache);
         metadata_cache = NULL;
     }
 }
 
 static void
 init_metadata_cache() {
     if (metadata_cache == NULL) {
         metadata_cache = crm_str_table_new();
     }
 }
 
 static xmlNode *
 get_agent_metadata(const char *agent)
 {
     xmlNode *xml = NULL;
     char *buffer = NULL;
 
     init_metadata_cache();
     buffer = g_hash_table_lookup(metadata_cache, agent);
     if(safe_str_eq(agent, STONITH_WATCHDOG_AGENT)) {
         return NULL;
 
     } else if(buffer == NULL) {
         stonith_t *st = stonith_api_new();
         int rc;
 
         if (st == NULL) {
             crm_warn("Could not get agent meta-data: "
                      "API memory allocation failed");
             return NULL;
         }
         rc = st->cmds->metadata(st, st_opt_sync_call, agent, NULL, &buffer, 10);
         stonith_api_delete(st);
         if (rc || !buffer) {
             crm_err("Could not retrieve metadata for fencing agent %s", agent);
             return NULL;
         }
         g_hash_table_replace(metadata_cache, strdup(agent), buffer);
     }
 
     xml = string2xml(buffer);
 
     return xml;
 }
 
 static gboolean
 is_nodeid_required(xmlNode * xml)
 {
     xmlXPathObjectPtr xpath = NULL;
 
     if (stand_alone) {
         return FALSE;
     }
 
     if (!xml) {
         return FALSE;
     }
 
     xpath = xpath_search(xml, "//parameter[@name='nodeid']");
     if (numXpathResults(xpath)  <= 0) {
         freeXpathObject(xpath);
         return FALSE;
     }
 
     freeXpathObject(xpath);
     return TRUE;
 }
 
 #define MAX_ACTION_LEN 256
 
 static char *
 add_action(char *actions, const char *action)
 {
     int offset = 0;
 
     if (actions == NULL) {
         actions = calloc(1, MAX_ACTION_LEN);
     } else {
         offset = strlen(actions);
     }
 
     if (offset > 0) {
         offset += snprintf(actions+offset, MAX_ACTION_LEN - offset, " ");
     }
     offset += snprintf(actions+offset, MAX_ACTION_LEN - offset, "%s", action);
 
     return actions;
 }
 
 static void
 read_action_metadata(stonith_device_t *device)
 {
     xmlXPathObjectPtr xpath = NULL;
     int max = 0;
     int lpc = 0;
 
     if (device->agent_metadata == NULL) {
         return;
     }
 
     xpath = xpath_search(device->agent_metadata, "//action");
     max = numXpathResults(xpath);
 
     if (max <= 0) {
         freeXpathObject(xpath);
         return;
     }
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *on_target = NULL;
         const char *action = NULL;
         xmlNode *match = getXpathResult(xpath, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if(match == NULL) { continue; };
 
         on_target = crm_element_value(match, "on_target");
         action = crm_element_value(match, "name");
 
         if(safe_str_eq(action, "list")) {
             set_bit(device->flags, st_device_supports_list);
         } else if(safe_str_eq(action, "status")) {
             set_bit(device->flags, st_device_supports_status);
         } else if(safe_str_eq(action, "reboot")) {
             set_bit(device->flags, st_device_supports_reboot);
         } else if (safe_str_eq(action, "on")) {
             /* "automatic" means the cluster will unfence node when it joins */
             const char *automatic = crm_element_value(match, "automatic");
 
             /* "required" is a deprecated synonym for "automatic" */
             const char *required = crm_element_value(match, "required");
 
             if (crm_is_true(automatic) || crm_is_true(required)) {
                 device->automatic_unfencing = TRUE;
             }
         }
 
         if (action && crm_is_true(on_target)) {
             device->on_target_actions = add_action(device->on_target_actions, action);
         }
     }
 
     freeXpathObject(xpath);
 }
 
 /*!
  * \internal
  * \brief Set a pcmk_*_action parameter if not already set
  *
  * \param[in,out] params  Device parameters
  * \param[in]     action  Name of action
  * \param[in]     value   Value to use if action is not already set
  */
 static void
 map_action(GHashTable *params, const char *action, const char *value)
 {
     char *key = crm_strdup_printf("pcmk_%s_action", action);
 
     if (g_hash_table_lookup(params, key)) {
         crm_warn("Ignoring %s='%s', see %s instead",
                  STONITH_ATTR_ACTION_OP, value, key);
         free(key);
     } else {
         crm_warn("Mapping %s='%s' to %s='%s'",
                  STONITH_ATTR_ACTION_OP, value, key, value);
         g_hash_table_insert(params, key, strdup(value));
     }
 }
 
 /*!
  * \internal
  * \brief Create device parameter table from XML
  *
  * \param[in]     name    Device name (used for logging only)
  * \param[in,out] params  Device parameters
  */
 static GHashTable *
 xml2device_params(const char *name, xmlNode *dev)
 {
     GHashTable *params = xml2list(dev);
     const char *value;
 
     /* Action should never be specified in the device configuration,
      * but we support it for users who are familiar with other software
      * that worked that way.
      */
     value = g_hash_table_lookup(params, STONITH_ATTR_ACTION_OP);
     if (value != NULL) {
         crm_warn("%s has '%s' parameter, which should never be specified in configuration",
                  name, STONITH_ATTR_ACTION_OP);
 
         if (*value == '\0') {
             crm_warn("Ignoring empty '%s' parameter", STONITH_ATTR_ACTION_OP);
 
         } else if (strcmp(value, "reboot") == 0) {
             crm_warn("Ignoring %s='reboot' (see stonith-action cluster property instead)",
                      STONITH_ATTR_ACTION_OP);
 
         } else if (strcmp(value, "off") == 0) {
             map_action(params, "reboot", value);
 
         } else {
             map_action(params, "off", value);
             map_action(params, "reboot", value);
         }
 
         g_hash_table_remove(params, STONITH_ATTR_ACTION_OP);
     }
 
     return params;
 }
 
 static stonith_device_t *
 build_device_from_xml(xmlNode * msg)
 {
     const char *value;
     xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
     stonith_device_t *device = NULL;
     char *agent = crm_element_value_copy(dev, "agent");
 
     CRM_CHECK(agent != NULL, return device);
 
     device = calloc(1, sizeof(stonith_device_t));
 
     CRM_CHECK(device != NULL, {free(agent); return device;});
 
     device->id = crm_element_value_copy(dev, XML_ATTR_ID);
     device->agent = agent;
     device->namespace = crm_element_value_copy(dev, "namespace");
     device->params = xml2device_params(device->id, dev);
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTLIST);
     if (value) {
         device->targets = stonith__parse_targets(value);
     }
 
     value = g_hash_table_lookup(device->params, STONITH_ATTR_HOSTMAP);
     device->aliases = build_port_aliases(value, &(device->targets));
 
     device->agent_metadata = get_agent_metadata(device->agent);
     read_action_metadata(device);
 
     value = g_hash_table_lookup(device->params, "nodeid");
     if (!value) {
         device->include_nodeid = is_nodeid_required(device->agent_metadata);
     }
 
     value = crm_element_value(dev, "rsc_provides");
     if (safe_str_eq(value, "unfencing")) {
         device->automatic_unfencing = TRUE;
     }
 
     if (is_action_required("on", device)) {
         crm_info("The fencing device '%s' requires unfencing", device->id);
     }
 
     if (device->on_target_actions) {
         crm_info("The fencing device '%s' requires actions (%s) to be executed on the target node",
                  device->id, device->on_target_actions);
     }
 
     device->work = mainloop_add_trigger(G_PRIORITY_HIGH, stonith_device_dispatch, device);
     /* TODO: Hook up priority */
 
     return device;
 }
 
 static const char *
 target_list_type(stonith_device_t * dev)
 {
     const char *check_type = NULL;
 
     check_type = g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTCHECK);
 
     if (check_type == NULL) {
 
         if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTLIST)) {
             check_type = "static-list";
         } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)) {
             check_type = "static-list";
         } else if(is_set(dev->flags, st_device_supports_list)){
             check_type = "dynamic-list";
         } else if(is_set(dev->flags, st_device_supports_status)){
             check_type = "status";
         } else {
             check_type = "none";
         }
     }
 
     return check_type;
 }
 
 static void
 schedule_internal_command(const char *origin,
                           stonith_device_t * device,
                           const char *action,
                           const char *victim,
                           int timeout,
                           void *internal_user_data,
                           void (*done_cb) (GPid pid, int rc, const char *output,
                                            gpointer user_data))
 {
     async_command_t *cmd = NULL;
 
     cmd = calloc(1, sizeof(async_command_t));
 
     cmd->id = -1;
     cmd->default_timeout = timeout ? timeout : 60;
     cmd->timeout = cmd->default_timeout;
     cmd->action = strdup(action);
     cmd->victim = victim ? strdup(victim) : NULL;
     cmd->device = strdup(device->id);
     cmd->origin = strdup(origin);
     cmd->client = strdup(crm_system_name);
     cmd->client_name = strdup(crm_system_name);
 
     cmd->internal_user_data = internal_user_data;
     cmd->done_cb = done_cb; /* cmd, not internal_user_data, is passed to 'done_cb' as the userdata */
 
     schedule_stonith_command(cmd, device);
 }
 
 gboolean
 string_in_list(GListPtr list, const char *item)
 {
     int lpc = 0;
     int max = g_list_length(list);
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *value = g_list_nth_data(list, lpc);
 
         if (safe_str_eq(item, value)) {
             return TRUE;
         } else {
             crm_trace("%d: '%s' != '%s'", lpc, item, value);
         }
     }
     return FALSE;
 }
 
 static void
 status_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t *cmd = user_data;
     struct device_search_s *search = cmd->internal_user_data;
     stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
     gboolean can = FALSE;
 
     free_async_command(cmd);
 
     if (!dev) {
         search_devices_record_result(search, NULL, FALSE);
         return;
     }
 
     mainloop_set_trigger(dev->work);
 
     if (rc == 1 /* unknown */ ) {
         crm_trace("Host %s is not known by %s", search->host, dev->id);
 
     } else if (rc == 0 /* active */  || rc == 2 /* inactive */ ) {
         crm_trace("Host %s is known by %s", search->host, dev->id);
         can = TRUE;
 
     } else {
         crm_notice("Unknown result when testing if %s can fence %s: rc=%d", dev->id, search->host,
                    rc);
     }
     search_devices_record_result(search, dev->id, can);
 }
 
 static void
 dynamic_list_search_cb(GPid pid, int rc, const char *output, gpointer user_data)
 {
     async_command_t *cmd = user_data;
     struct device_search_s *search = cmd->internal_user_data;
     stonith_device_t *dev = cmd->device ? g_hash_table_lookup(device_list, cmd->device) : NULL;
     gboolean can_fence = FALSE;
 
     free_async_command(cmd);
 
     /* Host/alias must be in the list output to be eligible to be fenced
      *
      * Will cause problems if down'd nodes aren't listed or (for virtual nodes)
      *  if the guest is still listed despite being moved to another machine
      */
     if (!dev) {
         search_devices_record_result(search, NULL, FALSE);
         return;
     }
 
     mainloop_set_trigger(dev->work);
 
     /* If we successfully got the targets earlier, don't disable. */
     if (rc != 0 && !dev->targets) {
         crm_notice("Disabling port list queries for %s (%d): %s", dev->id, rc, output);
         /* Fall back to status */
         g_hash_table_replace(dev->params, strdup(STONITH_ATTR_HOSTCHECK), strdup("status"));
 
         g_list_free_full(dev->targets, free);
         dev->targets = NULL;
     } else if (!rc) {
         crm_info("Refreshing port list for %s", dev->id);
         g_list_free_full(dev->targets, free);
         dev->targets = stonith__parse_targets(output);
         dev->targets_age = time(NULL);
     }
 
     if (dev->targets) {
         const char *alias = g_hash_table_lookup(dev->aliases, search->host);
 
         if (!alias) {
             alias = search->host;
         }
         if (string_in_list(dev->targets, alias)) {
             can_fence = TRUE;
         }
     }
     search_devices_record_result(search, dev->id, can_fence);
 }
 
 /*!
  * \internal
  * \brief Returns true if any key in first is not in second or second has a different value for key
  */
 static int
 device_params_diff(GHashTable *first, GHashTable *second) {
     char *key = NULL;
     char *value = NULL;
     GHashTableIter gIter;
 
     g_hash_table_iter_init(&gIter, first);
     while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&value)) {
 
         if(strstr(key, "CRM_meta") == key) {
             continue;
         } else if(strcmp(key, "crm_feature_set") == 0) {
             continue;
         } else {
             char *other_value = g_hash_table_lookup(second, key);
 
             if (!other_value || safe_str_neq(other_value, value)) {
                 crm_trace("Different value for %s: %s != %s", key, other_value, value);
                 return 1;
             }
         }
     }
 
     return 0;
 }
 
 /*!
  * \internal
  * \brief Checks to see if an identical device already exists in the device_list
  */
 static stonith_device_t *
 device_has_duplicate(stonith_device_t * device)
 {
     stonith_device_t *dup = g_hash_table_lookup(device_list, device->id);
 
     if (!dup) {
         crm_trace("No match for %s", device->id);
         return NULL;
 
     } else if (safe_str_neq(dup->agent, device->agent)) {
         crm_trace("Different agent: %s != %s", dup->agent, device->agent);
         return NULL;
     }
 
     /* Use calculate_operation_digest() here? */
     if (device_params_diff(device->params, dup->params) ||
         device_params_diff(dup->params, device->params)) {
         return NULL;
     }
 
     crm_trace("Match");
     return dup;
 }
 
 int
 stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib)
 {
     stonith_device_t *dup = NULL;
     stonith_device_t *device = build_device_from_xml(msg);
 
     CRM_CHECK(device != NULL, return -ENOMEM);
 
     dup = device_has_duplicate(device);
     if (dup) {
         crm_debug("Device '%s' already existed in device list (%d active devices)", device->id,
                    g_hash_table_size(device_list));
         free_device(device);
         device = dup;
 
     } else {
         stonith_device_t *old = g_hash_table_lookup(device_list, device->id);
 
         if (from_cib && old && old->api_registered) {
             /* If the cib is writing over an entry that is shared with a stonith client,
              * copy any pending ops that currently exist on the old entry to the new one.
              * Otherwise the pending ops will be reported as failures
              */
             crm_info("Overwriting an existing entry for %s from the cib", device->id);
             device->pending_ops = old->pending_ops;
             device->api_registered = TRUE;
             old->pending_ops = NULL;
             if (device->pending_ops) {
                 mainloop_set_trigger(device->work);
             }
         }
         g_hash_table_replace(device_list, device->id, device);
 
         crm_notice("Added '%s' to the device list (%d active devices)", device->id,
                    g_hash_table_size(device_list));
     }
     if (desc) {
         *desc = device->id;
     }
 
     if (from_cib) {
         device->cib_registered = TRUE;
     } else {
         device->api_registered = TRUE;
     }
 
     return pcmk_ok;
 }
 
 int
 stonith_device_remove(const char *id, gboolean from_cib)
 {
     stonith_device_t *device = g_hash_table_lookup(device_list, id);
 
     if (!device) {
         crm_info("Device '%s' not found (%d active devices)", id, g_hash_table_size(device_list));
         return pcmk_ok;
     }
 
     if (from_cib) {
         device->cib_registered = FALSE;
     } else {
         device->verified = FALSE;
         device->api_registered = FALSE;
     }
 
     if (!device->cib_registered && !device->api_registered) {
         g_hash_table_remove(device_list, id);
         crm_info("Removed '%s' from the device list (%d active devices)",
                  id, g_hash_table_size(device_list));
     } else {
         crm_trace("Not removing '%s' from the device list (%d active devices) "
                   "- still %s%s_registered", id, g_hash_table_size(device_list),
                   device->cib_registered?"cib":"", device->api_registered?"api":"");
     }
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Return the number of stonith levels registered for a node
  *
  * \param[in] tp  Node's topology table entry
  *
  * \return Number of non-NULL levels in topology entry
  * \note This function is used only for log messages.
  */
 static int
 count_active_levels(stonith_topology_t * tp)
 {
     int lpc = 0;
     int count = 0;
 
     for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
         if (tp->levels[lpc] != NULL) {
             count++;
         }
     }
     return count;
 }
 
 static void
 free_topology_entry(gpointer data)
 {
     stonith_topology_t *tp = data;
 
     int lpc = 0;
 
     for (lpc = 0; lpc < ST_LEVEL_MAX; lpc++) {
         if (tp->levels[lpc] != NULL) {
             g_list_free_full(tp->levels[lpc], free);
         }
     }
     free(tp->target);
     free(tp->target_value);
     free(tp->target_pattern);
     free(tp->target_attribute);
     free(tp);
 }
 
 void
 free_topology_list()
 {
     if (topology != NULL) {
         g_hash_table_destroy(topology);
         topology = NULL;
     }
 }
 
 void
 init_topology_list()
 {
     if (topology == NULL) {
         topology = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL,
                                          free_topology_entry);
     }
 }
 
 char *stonith_level_key(xmlNode *level, int mode)
 {
     if(mode == -1) {
         mode = stonith_level_kind(level);
     }
 
     switch(mode) {
         case 0:
             return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET);
         case 1:
             return crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN);
         case 2:
             {
                 const char *name = crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE);
                 const char *value = crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE);
 
                 if(name && value) {
                     return crm_strdup_printf("%s=%s", name, value);
                 }
             }
         default:
             return crm_strdup_printf("Unknown-%d-%s", mode, ID(level));
     }
 }
 
 int stonith_level_kind(xmlNode * level)
 {
     int mode = 0;
     const char *target = crm_element_value(level, XML_ATTR_STONITH_TARGET);
 
     if(target == NULL) {
         mode++;
         target = crm_element_value(level, XML_ATTR_STONITH_TARGET_PATTERN);
     }
 
     if(stand_alone == FALSE && target == NULL) {
 
         mode++;
 
         if(crm_element_value(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE) == NULL) {
             mode++;
 
         } else if(crm_element_value(level, XML_ATTR_STONITH_TARGET_VALUE) == NULL) {
             mode++;
         }
     }
 
     return mode;
 }
 
 static stonith_key_value_t *
 parse_device_list(const char *devices)
 {
     int lpc = 0;
     int max = 0;
     int last = 0;
     stonith_key_value_t *output = NULL;
 
     if (devices == NULL) {
         return output;
     }
 
     max = strlen(devices);
     for (lpc = 0; lpc <= max; lpc++) {
         if (devices[lpc] == ',' || devices[lpc] == 0) {
             char *line = strndup(devices + last, lpc - last);
 
             output = stonith_key_value_add(output, NULL, line);
             free(line);
 
             last = lpc + 1;
         }
     }
 
     return output;
 }
 
 /*!
  * \internal
  * \brief Register a STONITH level for a target
  *
  * Given an XML request specifying the target name, level index, and device IDs
  * for the level, this will create an entry for the target in the global topology
  * table if one does not already exist, then append the specified device IDs to
  * the entry's device list for the specified level.
  *
  * \param[in]  msg   XML request for STONITH level registration
  * \param[out] desc  If not NULL, will be set to string representation ("TARGET[LEVEL]")
  *
  * \return pcmk_ok on success, -EINVAL if XML does not specify valid level index
  */
 int
 stonith_level_register(xmlNode *msg, char **desc)
 {
     int id = 0;
     xmlNode *level;
     int mode;
     char *target;
 
     stonith_topology_t *tp;
     stonith_key_value_t *dIter = NULL;
     stonith_key_value_t *devices = NULL;
 
     /* Allow the XML here to point to the level tag directly, or wrapped in
      * another tag. If directly, don't search by xpath, because it might give
      * multiple hits (e.g. if the XML is the CIB).
      */
     if (safe_str_eq(TYPE(msg), XML_TAG_FENCING_LEVEL)) {
         level = msg;
     } else {
         level = get_xpath_object("//" XML_TAG_FENCING_LEVEL, msg, LOG_ERR);
     }
     CRM_CHECK(level != NULL, return -EINVAL);
 
     mode = stonith_level_kind(level);
     target = stonith_level_key(level, mode);
     crm_element_value_int(level, XML_ATTR_STONITH_INDEX, &id);
 
     if (desc) {
         *desc = crm_strdup_printf("%s[%d]", target, id);
     }
 
     /* Sanity-check arguments */
     if (mode >= 3 || (id <= 0) || (id >= ST_LEVEL_MAX)) {
         crm_trace("Could not add %s[%d] (%d) to the topology (%d active entries)", target, id, mode, g_hash_table_size(topology));
         free(target);
         crm_log_xml_err(level, "Bad topology");
         return -EINVAL;
     }
 
     /* Find or create topology table entry */
     tp = g_hash_table_lookup(topology, target);
     if (tp == NULL) {
         tp = calloc(1, sizeof(stonith_topology_t));
         tp->kind = mode;
         tp->target = target;
         tp->target_value = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_VALUE);
         tp->target_pattern = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_PATTERN);
         tp->target_attribute = crm_element_value_copy(level, XML_ATTR_STONITH_TARGET_ATTRIBUTE);
 
         g_hash_table_replace(topology, tp->target, tp);
         crm_trace("Added %s (%d) to the topology (%d active entries)",
                   target, mode, g_hash_table_size(topology));
     } else {
         free(target);
     }
 
     if (tp->levels[id] != NULL) {
         crm_info("Adding to the existing %s[%d] topology entry",
                  tp->target, id);
     }
 
     devices = parse_device_list(crm_element_value(level, XML_ATTR_STONITH_DEVICES));
     for (dIter = devices; dIter; dIter = dIter->next) {
         const char *device = dIter->value;
 
         crm_trace("Adding device '%s' for %s[%d]", device, tp->target, id);
         tp->levels[id] = g_list_append(tp->levels[id], strdup(device));
     }
     stonith_key_value_freeall(devices, 1, 1);
 
     crm_info("Target %s has %d active fencing levels",
              tp->target, count_active_levels(tp));
     return pcmk_ok;
 }
 
 int
 stonith_level_remove(xmlNode *msg, char **desc)
 {
     int id = 0;
     stonith_topology_t *tp;
     char *target;
 
     /* Unlike additions, removal requests should always have one level tag */
     xmlNode *level = get_xpath_object("//" XML_TAG_FENCING_LEVEL, msg, LOG_ERR);
 
     CRM_CHECK(level != NULL, return -EINVAL);
 
     target = stonith_level_key(level, -1);
     crm_element_value_int(level, XML_ATTR_STONITH_INDEX, &id);
     if (desc) {
         *desc = crm_strdup_printf("%s[%d]", target, id);
     }
 
     /* Sanity-check arguments */
     if (id >= ST_LEVEL_MAX) {
         free(target);
         return -EINVAL;
     }
 
     tp = g_hash_table_lookup(topology, target);
     if (tp == NULL) {
         crm_info("Topology for %s not found (%d active entries)",
                  target, g_hash_table_size(topology));
 
     } else if (id == 0 && g_hash_table_remove(topology, target)) {
         crm_info("Removed all %s related entries from the topology (%d active entries)",
                  target, g_hash_table_size(topology));
 
     } else if (id > 0 && tp->levels[id] != NULL) {
         g_list_free_full(tp->levels[id], free);
         tp->levels[id] = NULL;
 
         crm_info("Removed level '%d' from topology for %s (%d active levels remaining)",
                  id, target, count_active_levels(tp));
     }
 
     free(target);
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Schedule an (asynchronous) action directly on a stonith device
  *
  * Handle a STONITH_OP_EXEC API message by scheduling a requested agent action
  * directly on a specified device. Only list, monitor, and status actions are
  * expected to use this call, though it should work with any agent command.
  *
  * \param[in]  msg     API message XML with desired action
  * \param[out] output  Unused
  *
  * \return -EINPROGRESS on success, -errno otherwise
  * \note If the action is monitor, the device must be registered via the API
  *       (CIB registration is not sufficient), because monitor should not be
  *       possible unless the device is "started" (API registered).
  */
 static int
 stonith_device_action(xmlNode * msg, char **output)
 {
     xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, msg, LOG_ERR);
     xmlNode *op = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_ERR);
     const char *id = crm_element_value(dev, F_STONITH_DEVICE);
     const char *action = crm_element_value(op, F_STONITH_ACTION);
     async_command_t *cmd = NULL;
     stonith_device_t *device = NULL;
 
     if ((id == NULL) || (action == NULL)) {
         crm_info("Malformed API action request: device %s, action %s",
                  (id? id : "not specified"),
                  (action? action : "not specified"));
         return -EPROTO;
     }
 
     device = g_hash_table_lookup(device_list, id);
     if ((device == NULL)
         || (!device->api_registered && !strcmp(action, "monitor"))) {
 
         // Monitors may run only on "started" (API-registered) devices
         crm_info("Ignoring API '%s' action request because device %s not found",
                  action, id);
         return -ENODEV;
     }
 
     cmd = create_async_command(msg);
     if (cmd == NULL) {
         return -EPROTO;
     }
 
     schedule_stonith_command(cmd, device);
     return -EINPROGRESS;
 }
 
 static void
 search_devices_record_result(struct device_search_s *search, const char *device, gboolean can_fence)
 {
     search->replies_received++;
 
     if (can_fence && device) {
         search->capable = g_list_append(search->capable, strdup(device));
     }
 
     if (search->replies_needed == search->replies_received) {
 
         crm_debug("Finished Search. %d devices can perform action (%s) on node %s",
                   g_list_length(search->capable),
                   search->action ? search->action : "<unknown>",
                   search->host ? search->host : "<anyone>");
 
         search->callback(search->capable, search->user_data);
         free(search->host);
         free(search->action);
         free(search);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether the local host is allowed to execute a fencing action
  *
  * \param[in] device         Fence device to check
  * \param[in] action         Fence action to check
  * \param[in] target         Hostname of fence target
  * \param[in] allow_suicide  Whether self-fencing is allowed for this operation
  *
  * \return TRUE if local host is allowed to execute action, FALSE otherwise
  */
 static gboolean
 localhost_is_eligible(const stonith_device_t *device, const char *action,
                       const char *target, gboolean allow_suicide)
 {
     gboolean localhost_is_target = safe_str_eq(target, stonith_our_uname);
 
     if (device && action && device->on_target_actions
         && strstr(device->on_target_actions, action)) {
         if (!localhost_is_target) {
             crm_trace("'%s' operation with %s can only be executed for localhost not %s",
                       action, device->id, target);
             return FALSE;
         }
 
     } else if (localhost_is_target && !allow_suicide) {
         crm_trace("'%s' operation does not support self-fencing", action);
         return FALSE;
     }
     return TRUE;
 }
 
 static void
 can_fence_host_with_device(stonith_device_t * dev, struct device_search_s *search)
 {
     gboolean can = FALSE;
     const char *check_type = NULL;
     const char *host = search->host;
     const char *alias = NULL;
 
     CRM_LOG_ASSERT(dev != NULL);
 
     if (dev == NULL) {
         goto search_report_results;
     } else if (host == NULL) {
         can = TRUE;
         goto search_report_results;
     }
 
     /* Short-circuit query if this host is not allowed to perform the action */
     if (safe_str_eq(search->action, "reboot")) {
         /* A "reboot" *might* get remapped to "off" then "on", so short-circuit
          * only if all three are disallowed. If only one or two are disallowed,
          * we'll report that with the results. We never allow suicide for
          * remapped "on" operations because the host is off at that point.
          */
         if (!localhost_is_eligible(dev, "reboot", host, search->allow_suicide)
             && !localhost_is_eligible(dev, "off", host, search->allow_suicide)
             && !localhost_is_eligible(dev, "on", host, FALSE)) {
             goto search_report_results;
         }
     } else if (!localhost_is_eligible(dev, search->action, host,
                                       search->allow_suicide)) {
         goto search_report_results;
     }
 
     alias = g_hash_table_lookup(dev->aliases, host);
     if (alias == NULL) {
         alias = host;
     }
 
     check_type = target_list_type(dev);
 
     if (safe_str_eq(check_type, "none")) {
         can = TRUE;
 
     } else if (safe_str_eq(check_type, "static-list")) {
 
         /* Presence in the hostmap is sufficient
          * Only use if all hosts on which the device can be active can always fence all listed hosts
          */
 
         if (string_in_list(dev->targets, host)) {
             can = TRUE;
         } else if (g_hash_table_lookup(dev->params, STONITH_ATTR_HOSTMAP)
                    && g_hash_table_lookup(dev->aliases, host)) {
             can = TRUE;
         }
 
     } else if (safe_str_eq(check_type, "dynamic-list")) {
         time_t now = time(NULL);
 
         if (dev->targets == NULL || dev->targets_age + 60 < now) {
             crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
                       check_type, dev->id, search->host, search->action);
 
             schedule_internal_command(__FUNCTION__, dev, "list", NULL,
                                       search->per_device_timeout, search, dynamic_list_search_cb);
 
             /* we'll respond to this search request async in the cb */
             return;
         }
 
         if (string_in_list(dev->targets, alias)) {
             can = TRUE;
         }
 
     } else if (safe_str_eq(check_type, "status")) {
         crm_trace("Running '%s' to check whether %s is eligible to fence %s (%s)",
                   check_type, dev->id, search->host, search->action);
         schedule_internal_command(__FUNCTION__, dev, "status", search->host,
                                   search->per_device_timeout, search, status_search_cb);
         /* we'll respond to this search request async in the cb */
         return;
     } else {
         crm_err("Invalid value for " STONITH_ATTR_HOSTCHECK ": %s", check_type);
         check_type = "Invalid " STONITH_ATTR_HOSTCHECK;
     }
 
     if (safe_str_eq(host, alias)) {
         crm_notice("%s is%s eligible to fence (%s) %s: %s",
                    dev->id, (can? "" : " not"), search->action, host,
                    check_type);
     } else {
         crm_notice("%s is%s eligible to fence (%s) %s (aka. '%s'): %s",
                    dev->id, (can? "" : " not"), search->action, host, alias,
                    check_type);
     }
 
   search_report_results:
     search_devices_record_result(search, dev ? dev->id : NULL, can);
 }
 
 static void
 search_devices(gpointer key, gpointer value, gpointer user_data)
 {
     stonith_device_t *dev = value;
     struct device_search_s *search = user_data;
 
     can_fence_host_with_device(dev, search);
 }
 
 #define DEFAULT_QUERY_TIMEOUT 20
 static void
 get_capable_devices(const char *host, const char *action, int timeout, bool suicide, void *user_data,
                     void (*callback) (GList * devices, void *user_data))
 {
     struct device_search_s *search;
     int per_device_timeout = DEFAULT_QUERY_TIMEOUT;
     int devices_needing_async_query = 0;
     char *key = NULL;
     const char *check_type = NULL;
     GHashTableIter gIter;
     stonith_device_t *device = NULL;
 
     if (!g_hash_table_size(device_list)) {
         callback(NULL, user_data);
         return;
     }
 
     search = calloc(1, sizeof(struct device_search_s));
     if (!search) {
         callback(NULL, user_data);
         return;
     }
 
     g_hash_table_iter_init(&gIter, device_list);
     while (g_hash_table_iter_next(&gIter, (void **)&key, (void **)&device)) {
         check_type = target_list_type(device);
         if (safe_str_eq(check_type, "status") || safe_str_eq(check_type, "dynamic-list")) {
             devices_needing_async_query++;
         }
     }
 
     /* If we have devices that require an async event in order to know what
      * nodes they can fence, we have to give the events a timeout. The total
      * query timeout is divided among those events. */
     if (devices_needing_async_query) {
         per_device_timeout = timeout / devices_needing_async_query;
         if (!per_device_timeout) {
             crm_err("STONITH timeout %ds is too low; using %ds, but consider raising to at least %ds",
                     timeout, DEFAULT_QUERY_TIMEOUT,
                     DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
             per_device_timeout = DEFAULT_QUERY_TIMEOUT;
         } else if (per_device_timeout < DEFAULT_QUERY_TIMEOUT) {
             crm_notice("STONITH timeout %ds is low for the current configuration;"
                        " consider raising to at least %ds",
                        timeout, DEFAULT_QUERY_TIMEOUT * devices_needing_async_query);
         }
     }
 
     search->host = host ? strdup(host) : NULL;
     search->action = action ? strdup(action) : NULL;
     search->per_device_timeout = per_device_timeout;
     /* We are guaranteed this many replies. Even if a device gets
      * unregistered some how during the async search, we will get
      * the correct number of replies. */
     search->replies_needed = g_hash_table_size(device_list);
     search->allow_suicide = suicide;
     search->callback = callback;
     search->user_data = user_data;
     /* kick off the search */
 
     crm_debug("Searching through %d devices to see what is capable of action (%s) for target %s",
               search->replies_needed,
               search->action ? search->action : "<unknown>",
               search->host ? search->host : "<anyone>");
     g_hash_table_foreach(device_list, search_devices, search);
 }
 
 struct st_query_data {
     xmlNode *reply;
     char *remote_peer;
     char *client_id;
     char *target;
     char *action;
     int call_options;
 };
 
 /*!
  * \internal
  * \brief Add action-specific attributes to query reply XML
  *
  * \param[in,out] xml     XML to add attributes to
  * \param[in]     action  Fence action
  * \param[in]     device  Fence device
  */
 static void
 add_action_specific_attributes(xmlNode *xml, const char *action,
                                stonith_device_t *device)
 {
     int action_specific_timeout;
     int delay_max;
     int delay_base;
 
     CRM_CHECK(xml && action && device, return);
 
     if (is_action_required(action, device)) {
         crm_trace("Action '%s' is required on %s", action, device->id);
         crm_xml_add_int(xml, F_STONITH_DEVICE_REQUIRED, 1);
     }
 
     action_specific_timeout = get_action_timeout(device, action, 0);
     if (action_specific_timeout) {
         crm_trace("Action '%s' has timeout %dms on %s",
                   action, action_specific_timeout, device->id);
         crm_xml_add_int(xml, F_STONITH_ACTION_TIMEOUT, action_specific_timeout);
     }
 
     delay_max = get_action_delay_max(device, action);
     if (delay_max > 0) {
         crm_trace("Action '%s' has maximum random delay %dms on %s",
                   action, delay_max, device->id);
         crm_xml_add_int(xml, F_STONITH_DELAY_MAX, delay_max / 1000);
     }
 
     delay_base = get_action_delay_base(device, action);
     if (delay_base > 0) {
         crm_xml_add_int(xml, F_STONITH_DELAY_BASE, delay_base / 1000);
     }
 
     if ((delay_max > 0) && (delay_base == 0)) {
         crm_trace("Action '%s' has maximum random delay %dms on %s",
                   action, delay_max, device->id);
     } else if ((delay_max == 0) && (delay_base > 0)) {
         crm_trace("Action '%s' has a static delay of %dms on %s",
                   action, delay_base, device->id);
     } else if ((delay_max > 0) && (delay_base > 0)) {
         crm_trace("Action '%s' has a minimum delay of %dms and a randomly chosen "
                   "maximum delay of %dms on %s",
                   action, delay_base, delay_max, device->id);
     }
 }
 
 /*!
  * \internal
  * \brief Add "disallowed" attribute to query reply XML if appropriate
  *
  * \param[in,out] xml            XML to add attribute to
  * \param[in]     action         Fence action
  * \param[in]     device         Fence device
  * \param[in]     target         Fence target
  * \param[in]     allow_suicide  Whether self-fencing is allowed
  */
 static void
 add_disallowed(xmlNode *xml, const char *action, stonith_device_t *device,
                const char *target, gboolean allow_suicide)
 {
     if (!localhost_is_eligible(device, action, target, allow_suicide)) {
         crm_trace("Action '%s' on %s is disallowed for local host",
                   action, device->id);
         crm_xml_add(xml, F_STONITH_ACTION_DISALLOWED, XML_BOOLEAN_TRUE);
     }
 }
 
 /*!
  * \internal
  * \brief Add child element with action-specific values to query reply XML
  *
  * \param[in,out] xml            XML to add attribute to
  * \param[in]     action         Fence action
  * \param[in]     device         Fence device
  * \param[in]     target         Fence target
  * \param[in]     allow_suicide  Whether self-fencing is allowed
  */
 static void
 add_action_reply(xmlNode *xml, const char *action, stonith_device_t *device,
                const char *target, gboolean allow_suicide)
 {
     xmlNode *child = create_xml_node(xml, F_STONITH_ACTION);
 
     crm_xml_add(child, XML_ATTR_ID, action);
     add_action_specific_attributes(child, action, device);
     add_disallowed(child, action, device, target, allow_suicide);
 }
 
 static void
 stonith_query_capable_device_cb(GList * devices, void *user_data)
 {
     struct st_query_data *query = user_data;
     int available_devices = 0;
     xmlNode *dev = NULL;
     xmlNode *list = NULL;
     GListPtr lpc = NULL;
 
     /* Pack the results into XML */
     list = create_xml_node(NULL, __FUNCTION__);
     crm_xml_add(list, F_STONITH_TARGET, query->target);
     for (lpc = devices; lpc != NULL; lpc = lpc->next) {
         stonith_device_t *device = g_hash_table_lookup(device_list, lpc->data);
         const char *action = query->action;
 
         if (!device) {
             /* It is possible the device got unregistered while
              * determining who can fence the target */
             continue;
         }
 
         available_devices++;
 
         dev = create_xml_node(list, F_STONITH_DEVICE);
         crm_xml_add(dev, XML_ATTR_ID, device->id);
         crm_xml_add(dev, "namespace", device->namespace);
         crm_xml_add(dev, "agent", device->agent);
         crm_xml_add_int(dev, F_STONITH_DEVICE_VERIFIED, device->verified);
 
         /* If the originating fencer wants to reboot the node, and we have a
          * capable device that doesn't support "reboot", remap to "off" instead.
          */
         if (is_not_set(device->flags, st_device_supports_reboot)
             && safe_str_eq(query->action, "reboot")) {
             crm_trace("%s doesn't support reboot, using values for off instead",
                       device->id);
             action = "off";
         }
 
         /* Add action-specific values if available */
         add_action_specific_attributes(dev, action, device);
         if (safe_str_eq(query->action, "reboot")) {
             /* A "reboot" *might* get remapped to "off" then "on", so after
              * sending the "reboot"-specific values in the main element, we add
              * sub-elements for "off" and "on" values.
              *
              * We short-circuited earlier if "reboot", "off" and "on" are all
              * disallowed for the local host. However if only one or two are
              * disallowed, we send back the results and mark which ones are
              * disallowed. If "reboot" is disallowed, this might cause problems
              * with older fencer versions, which won't check for it. Older
              * versions will ignore "off" and "on", so they are not a problem.
              */
             add_disallowed(dev, action, device, query->target,
                            is_set(query->call_options, st_opt_allow_suicide));
             add_action_reply(dev, "off", device, query->target,
                              is_set(query->call_options, st_opt_allow_suicide));
             add_action_reply(dev, "on", device, query->target, FALSE);
         }
 
         /* A query without a target wants device parameters */
         if (query->target == NULL) {
             xmlNode *attrs = create_xml_node(dev, XML_TAG_ATTRS);
 
             g_hash_table_foreach(device->params, hash2field, attrs);
         }
     }
 
     crm_xml_add_int(list, F_STONITH_AVAILABLE_DEVICES, available_devices);
     if (query->target) {
         crm_debug("Found %d matching devices for '%s'", available_devices, query->target);
     } else {
         crm_debug("%d devices installed", available_devices);
     }
 
     if (list != NULL) {
         crm_log_xml_trace(list, "Add query results");
         add_message_xml(query->reply, F_STONITH_CALLDATA, list);
     }
     stonith_send_reply(query->reply, query->call_options, query->remote_peer, query->client_id);
 
     free_xml(query->reply);
     free(query->remote_peer);
     free(query->client_id);
     free(query->target);
     free(query->action);
     free(query);
     free_xml(list);
     g_list_free_full(devices, free);
 }
 
 static void
 stonith_query(xmlNode * msg, const char *remote_peer, const char *client_id, int call_options)
 {
     struct st_query_data *query = NULL;
     const char *action = NULL;
     const char *target = NULL;
     int timeout = 0;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_ACTION, msg, LOG_NEVER);
 
     crm_element_value_int(msg, F_STONITH_TIMEOUT, &timeout);
     if (dev) {
         const char *device = crm_element_value(dev, F_STONITH_DEVICE);
 
         target = crm_element_value(dev, F_STONITH_TARGET);
         action = crm_element_value(dev, F_STONITH_ACTION);
         if (device && safe_str_eq(device, "manual_ack")) {
             /* No query or reply necessary */
             return;
         }
     }
 
     crm_log_xml_debug(msg, "Query");
     query = calloc(1, sizeof(struct st_query_data));
 
     query->reply = stonith_construct_reply(msg, NULL, NULL, pcmk_ok);
     query->remote_peer = remote_peer ? strdup(remote_peer) : NULL;
     query->client_id = client_id ? strdup(client_id) : NULL;
     query->target = target ? strdup(target) : NULL;
     query->action = action ? strdup(action) : NULL;
     query->call_options = call_options;
 
     get_capable_devices(target, action, timeout,
                         is_set(call_options, st_opt_allow_suicide),
                         query, stonith_query_capable_device_cb);
 }
 
 #define ST_LOG_OUTPUT_MAX 512
 static void
-log_operation(async_command_t * cmd, int rc, int pid, const char *next, const char *output)
+log_operation(async_command_t * cmd, int rc, int pid, const char *next, const char *output, gboolean op_merged)
 {
     if (rc == 0) {
         next = NULL;
     }
 
     if (cmd->victim != NULL) {
         do_crm_log(rc == 0 ? LOG_NOTICE : LOG_ERR,
-                   "Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned: %d (%s)%s%s",
+                   "Operation '%s' [%d] (call %d from %s) for host '%s' with device '%s' returned %s: %d (%s)%s%s",
                    cmd->action, pid, cmd->id, cmd->client_name, cmd->victim,
-                   cmd->device, rc, pcmk_strerror(rc),
+                   cmd->device, (op_merged? "(merged)" : ""), rc, pcmk_strerror(rc),
                    (next? ", retrying with " : ""), (next ? next : ""));
     } else {
         do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
-                            "Operation '%s' [%d] for device '%s' returned: %d (%s)%s%s",
-                            cmd->action, pid, cmd->device, rc, pcmk_strerror(rc),
+                            "Operation '%s' [%d] for device '%s' returned %s: %d (%s)%s%s",
+                            cmd->action, pid, cmd->device, (op_merged? "(merged)" : ""), rc, pcmk_strerror(rc),
                             (next? ", retrying with " : ""), (next ? next : ""));
     }
 
     if (output) {
         /* Logging the whole string confuses syslog when the string is xml */
         char *prefix = crm_strdup_printf("%s:%d", cmd->device, pid);
 
         crm_log_output(rc == 0 ? LOG_DEBUG : LOG_WARNING, prefix, output);
         free(prefix);
     }
 }
 
 static void
-stonith_send_async_reply(async_command_t * cmd, const char *output, int rc, GPid pid)
+stonith_send_async_reply(async_command_t * cmd, const char *output, int rc, GPid pid, int options)
 {
     xmlNode *reply = NULL;
     gboolean bcast = FALSE;
 
     reply = stonith_construct_async_reply(cmd, output, NULL, rc);
 
     if (safe_str_eq(cmd->action, "metadata")) {
         /* Too verbose to log */
         crm_trace("Metadata query for %s", cmd->device);
         output = NULL;
 
     } else if (crm_str_eq(cmd->action, "monitor", TRUE) ||
                crm_str_eq(cmd->action, "list", TRUE) || crm_str_eq(cmd->action, "status", TRUE)) {
         crm_trace("Never broadcast '%s' replies", cmd->action);
 
     } else if (!stand_alone && safe_str_eq(cmd->origin, cmd->victim) && safe_str_neq(cmd->action, "on")) {
         crm_trace("Broadcast '%s' reply for %s", cmd->action, cmd->victim);
         crm_xml_add(reply, F_SUBTYPE, "broadcast");
         bcast = TRUE;
     }
 
-    log_operation(cmd, rc, pid, NULL, output);
+    log_operation(cmd, rc, pid, NULL, output, (options & st_reply_opt_merged ? TRUE : FALSE));
     crm_log_xml_trace(reply, "Reply");
 
+    if (options & st_reply_opt_merged) {
+        crm_xml_add(reply, F_STONITH_MERGED, "true");
+    }
+
     if (bcast) {
         crm_xml_add(reply, F_STONITH_OPERATION, T_STONITH_NOTIFY);
         send_cluster_message(NULL, crm_msg_stonith_ng, reply, FALSE);
 
     } else if (cmd->origin) {
         crm_trace("Directed reply to %s", cmd->origin);
         send_cluster_message(crm_get_peer(0, cmd->origin), crm_msg_stonith_ng, reply, FALSE);
 
     } else {
         crm_trace("Directed local %ssync reply to %s",
                   (cmd->options & st_opt_sync_call) ? "" : "a-", cmd->client_name);
         do_local_reply(reply, cmd->client, cmd->options & st_opt_sync_call, FALSE);
     }
 
     if (stand_alone) {
         /* Do notification with a clean data object */
         xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
         crm_xml_add_int(notify_data, F_STONITH_RC, rc);
         crm_xml_add(notify_data, F_STONITH_TARGET, cmd->victim);
         crm_xml_add(notify_data, F_STONITH_OPERATION, cmd->op);
         crm_xml_add(notify_data, F_STONITH_DELEGATE, "localhost");
         crm_xml_add(notify_data, F_STONITH_DEVICE, cmd->device);
         crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
         crm_xml_add(notify_data, F_STONITH_ORIGIN, cmd->client);
 
         do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
         do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
     }
 
     free_xml(reply);
 }
 
 static void
 cancel_stonith_command(async_command_t * cmd)
 {
     stonith_device_t *device;
 
     CRM_CHECK(cmd != NULL, return);
 
     if (!cmd->device) {
         return;
     }
 
     device = g_hash_table_lookup(device_list, cmd->device);
 
     if (device) {
         crm_trace("Cancel scheduled '%s' action on %s", cmd->action, device->id);
         device->pending_ops = g_list_remove(device->pending_ops, cmd);
     }
 }
 
 static void
 st_child_done(GPid pid, int rc, const char *output, gpointer user_data)
 {
     stonith_device_t *device = NULL;
     stonith_device_t *next_device = NULL;
     async_command_t *cmd = user_data;
 
     GListPtr gIter = NULL;
     GListPtr gIterNext = NULL;
 
     CRM_CHECK(cmd != NULL, return);
 
     cmd->active_on = NULL;
 
     /* The device is ready to do something else now */
     device = g_hash_table_lookup(device_list, cmd->device);
     if (device) {
         if (!device->verified && (rc == pcmk_ok) &&
             (safe_str_eq(cmd->action, "list") ||
              safe_str_eq(cmd->action, "monitor") || safe_str_eq(cmd->action, "status"))) {
 
             device->verified = TRUE;
         }
 
         mainloop_set_trigger(device->work);
     }
 
     crm_debug("Operation '%s' on '%s' completed with rc=%d (%d remaining)",
               cmd->action, cmd->device, rc, g_list_length(cmd->device_next));
 
     if (rc == 0) {
         GListPtr iter;
         /* see if there are any required devices left to execute for this op */
         for (iter = cmd->device_next; iter != NULL; iter = iter->next) {
             next_device = g_hash_table_lookup(device_list, iter->data);
 
             if (next_device != NULL && is_action_required(cmd->action, next_device)) {
                 cmd->device_next = iter->next;
                 break;
             }
             next_device = NULL;
         }
 
     } else if (rc != 0 && cmd->device_next && (is_action_required(cmd->action, device) == FALSE)) {
         /* if this device didn't work out, see if there are any others we can try.
          * if the failed device was 'required', we can't pick another device. */
         next_device = g_hash_table_lookup(device_list, cmd->device_next->data);
         cmd->device_next = cmd->device_next->next;
     }
 
     /* this operation requires more fencing, hooray! */
     if (next_device) {
-        log_operation(cmd, rc, pid, next_device->id, output);
+        log_operation(cmd, rc, pid, next_device->id, output, FALSE);
 
         schedule_stonith_command(cmd, next_device);
         /* Prevent cmd from being freed */
         cmd = NULL;
         goto done;
     }
 
-    stonith_send_async_reply(cmd, output, rc, pid);
+    stonith_send_async_reply(cmd, output, rc, pid, st_reply_opt_none);
 
     if (rc != 0) {
         goto done;
     }
 
     /* Check to see if any operations are scheduled to do the exact
      * same thing that just completed.  If so, rather than
      * performing the same fencing operation twice, return the result
      * of this operation for all pending commands it matches. */
     for (gIter = cmd_list; gIter != NULL; gIter = gIterNext) {
         async_command_t *cmd_other = gIter->data;
 
         gIterNext = gIter->next;
 
         if (cmd == cmd_other) {
             continue;
         }
 
         /* A pending scheduled command matches the command that just finished if.
          * 1. The client connections are different.
          * 2. The node victim is the same.
          * 3. The fencing action is the same.
          * 4. The device scheduled to execute the action is the same.
          */
         if (safe_str_eq(cmd->client, cmd_other->client) ||
             safe_str_neq(cmd->victim, cmd_other->victim) ||
             safe_str_neq(cmd->action, cmd_other->action) ||
             safe_str_neq(cmd->device, cmd_other->device)) {
 
             continue;
         }
 
         /* Duplicate merging will do the right thing for either type of remapped
          * reboot. If the executing fencer remapped an unsupported reboot to
          * off, then cmd->action will be reboot and will be merged with any
          * other reboot requests. If the originating fencer remapped a
          * topology reboot to off then on, we will get here once with
          * cmd->action "off" and once with "on", and they will be merged
          * separately with similar requests.
          */
         crm_notice
             ("Merging stonith action '%s' targeting %s originating from client %s with identical stonith request from client %s",
              cmd_other->action, cmd_other->victim, cmd_other->client_name, cmd->client_name);
 
         cmd_list = g_list_remove_link(cmd_list, gIter);
 
-        stonith_send_async_reply(cmd_other, output, rc, pid);
+        stonith_send_async_reply(cmd_other, output, rc, pid, st_reply_opt_merged);
         cancel_stonith_command(cmd_other);
 
         free_async_command(cmd_other);
         g_list_free_1(gIter);
     }
 
   done:
     free_async_command(cmd);
 }
 
 static gint
 sort_device_priority(gconstpointer a, gconstpointer b)
 {
     const stonith_device_t *dev_a = a;
     const stonith_device_t *dev_b = b;
 
     if (dev_a->priority > dev_b->priority) {
         return -1;
     } else if (dev_a->priority < dev_b->priority) {
         return 1;
     }
     return 0;
 }
 
 static void
 stonith_fence_get_devices_cb(GList * devices, void *user_data)
 {
     async_command_t *cmd = user_data;
     stonith_device_t *device = NULL;
 
     crm_info("Found %d matching devices for '%s'", g_list_length(devices), cmd->victim);
 
     if (g_list_length(devices) > 0) {
         /* Order based on priority */
         devices = g_list_sort(devices, sort_device_priority);
         device = g_hash_table_lookup(device_list, devices->data);
 
         if (device) {
             cmd->device_list = devices;
             cmd->device_next = devices->next;
             devices = NULL;     /* list owned by cmd now */
         }
     }
 
     /* we have a device, schedule it for fencing. */
     if (device) {
         schedule_stonith_command(cmd, device);
         /* in progress */
         return;
     }
 
     /* no device found! */
-    stonith_send_async_reply(cmd, NULL, -ENODEV, 0);
+    stonith_send_async_reply(cmd, NULL, -ENODEV, 0, st_reply_opt_none);
 
     free_async_command(cmd);
     g_list_free_full(devices, free);
 }
 
 static int
 stonith_fence(xmlNode * msg)
 {
     const char *device_id = NULL;
     stonith_device_t *device = NULL;
     async_command_t *cmd = create_async_command(msg);
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
 
     if (cmd == NULL) {
         return -EPROTO;
     }
 
     device_id = crm_element_value(dev, F_STONITH_DEVICE);
     if (device_id) {
         device = g_hash_table_lookup(device_list, device_id);
         if (device == NULL) {
             crm_err("Requested device '%s' is not available", device_id);
             return -ENODEV;
         }
         schedule_stonith_command(cmd, device);
 
     } else {
         const char *host = crm_element_value(dev, F_STONITH_TARGET);
 
         if (cmd->options & st_opt_cs_nodeid) {
             int nodeid = crm_atoi(host, NULL);
             crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY);
 
             if (node) {
                 host = node->uname;
             }
         }
 
         /* If we get to here, then self-fencing is implicitly allowed */
         get_capable_devices(host, cmd->action, cmd->default_timeout,
                             TRUE, cmd, stonith_fence_get_devices_cb);
     }
 
     return -EINPROGRESS;
 }
 
 xmlNode *
 stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data, int rc)
 {
     int lpc = 0;
     xmlNode *reply = NULL;
 
     const char *name = NULL;
     const char *value = NULL;
 
     const char *names[] = {
         F_STONITH_OPERATION,
         F_STONITH_CALLID,
         F_STONITH_CLIENTID,
         F_STONITH_CLIENTNAME,
         F_STONITH_REMOTE_OP_ID,
         F_STONITH_CALLOPTS
     };
 
     crm_trace("Creating a basic reply");
     reply = create_xml_node(NULL, T_STONITH_REPLY);
 
     crm_xml_add(reply, "st_origin", __FUNCTION__);
     crm_xml_add(reply, F_TYPE, T_STONITH_NG);
     crm_xml_add(reply, "st_output", output);
     crm_xml_add_int(reply, F_STONITH_RC, rc);
 
     CRM_CHECK(request != NULL, crm_warn("Can't create a sane reply"); return reply);
     for (lpc = 0; lpc < DIMOF(names); lpc++) {
         name = names[lpc];
         value = crm_element_value(request, name);
         crm_xml_add(reply, name, value);
     }
 
     if (data != NULL) {
         crm_trace("Attaching reply output");
         add_message_xml(reply, F_STONITH_CALLDATA, data);
     }
     return reply;
 }
 
 static xmlNode *
 stonith_construct_async_reply(async_command_t * cmd, const char *output, xmlNode * data, int rc)
 {
     xmlNode *reply = NULL;
 
     crm_trace("Creating a basic reply");
     reply = create_xml_node(NULL, T_STONITH_REPLY);
 
     crm_xml_add(reply, "st_origin", __FUNCTION__);
     crm_xml_add(reply, F_TYPE, T_STONITH_NG);
 
     crm_xml_add(reply, F_STONITH_OPERATION, cmd->op);
     crm_xml_add(reply, F_STONITH_DEVICE, cmd->device);
     crm_xml_add(reply, F_STONITH_REMOTE_OP_ID, cmd->remote_op_id);
     crm_xml_add(reply, F_STONITH_CLIENTID, cmd->client);
     crm_xml_add(reply, F_STONITH_CLIENTNAME, cmd->client_name);
     crm_xml_add(reply, F_STONITH_TARGET, cmd->victim);
     crm_xml_add(reply, F_STONITH_ACTION, cmd->op);
     crm_xml_add(reply, F_STONITH_ORIGIN, cmd->origin);
     crm_xml_add_int(reply, F_STONITH_CALLID, cmd->id);
     crm_xml_add_int(reply, F_STONITH_CALLOPTS, cmd->options);
 
     crm_xml_add_int(reply, F_STONITH_RC, rc);
 
     crm_xml_add(reply, "st_output", output);
 
     if (data != NULL) {
         crm_info("Attaching reply output");
         add_message_xml(reply, F_STONITH_CALLDATA, data);
     }
     return reply;
 }
 
 bool fencing_peer_active(crm_node_t *peer)
 {
     if (peer == NULL) {
         return FALSE;
     } else if (peer->uname == NULL) {
         return FALSE;
     } else if (is_set(peer->processes, crm_get_cluster_proc())) {
         return TRUE;
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Determine if we need to use an alternate node to
  * fence the target. If so return that node's uname
  *
  * \retval NULL, no alternate host
  * \retval uname, uname of alternate host to use
  */
 static const char *
 check_alternate_host(const char *target)
 {
     const char *alternate_host = NULL;
 
     crm_trace("Checking if we (%s) can fence %s", stonith_our_uname, target);
     if (find_topology_for_host(target) && safe_str_eq(target, stonith_our_uname)) {
         GHashTableIter gIter;
         crm_node_t *entry = NULL;
 
         g_hash_table_iter_init(&gIter, crm_peer_cache);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
             crm_trace("Checking for %s.%d != %s", entry->uname, entry->id, target);
             if (fencing_peer_active(entry)
                 && safe_str_neq(entry->uname, target)) {
                 alternate_host = entry->uname;
                 break;
             }
         }
         if (alternate_host == NULL) {
             crm_err("No alternate host available to handle complex self fencing request");
             g_hash_table_iter_init(&gIter, crm_peer_cache);
             while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
                 crm_notice("Peer[%d] %s", entry->id, entry->uname);
             }
         }
     }
 
     return alternate_host;
 }
 
 static void
 stonith_send_reply(xmlNode * reply, int call_options, const char *remote_peer,
                    const char *client_id)
 {
     if (remote_peer) {
         send_cluster_message(crm_get_peer(0, remote_peer), crm_msg_stonith_ng, reply, FALSE);
     } else {
         do_local_reply(reply, client_id, is_set(call_options, st_opt_sync_call), remote_peer != NULL);
     }
 }
 
 static int
 handle_request(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
                const char *remote_peer)
 {
     int call_options = 0;
     int rc = -EOPNOTSUPP;
 
     xmlNode *data = NULL;
     xmlNode *reply = NULL;
 
     char *output = NULL;
     const char *op = crm_element_value(request, F_STONITH_OPERATION);
     const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
 
     if (is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(client == NULL || client->request_id == id);
     }
 
     if (crm_str_eq(op, CRM_OP_REGISTER, TRUE)) {
         xmlNode *reply = create_xml_node(NULL, "reply");
 
         CRM_ASSERT(client);
         crm_xml_add(reply, F_STONITH_OPERATION, CRM_OP_REGISTER);
         crm_xml_add(reply, F_STONITH_CLIENTID, client->id);
         crm_ipcs_send(client, id, reply, flags);
         client->request_id = 0;
         free_xml(reply);
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_EXEC, TRUE)) {
         rc = stonith_device_action(request, &output);
 
     } else if (crm_str_eq(op, STONITH_OP_TIMEOUT_UPDATE, TRUE)) {
         const char *call_id = crm_element_value(request, F_STONITH_CALLID);
         const char *client_id = crm_element_value(request, F_STONITH_CLIENTID);
         int op_timeout = 0;
 
         crm_element_value_int(request, F_STONITH_TIMEOUT, &op_timeout);
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
         if (remote_peer) {
             create_remote_stonith_op(client_id, request, TRUE); /* Record it for the future notification */
         }
         stonith_query(request, remote_peer, client_id, call_options);
         return 0;
 
     } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
         const char *flag_name = NULL;
 
         CRM_ASSERT(client);
         flag_name = crm_element_value(request, F_STONITH_NOTIFY_ACTIVATE);
         if (flag_name) {
             crm_debug("Setting %s callbacks for %s (%s): ON", flag_name, client->name, client->id);
             client->options |= get_stonith_flag(flag_name);
         }
 
         flag_name = crm_element_value(request, F_STONITH_NOTIFY_DEACTIVATE);
         if (flag_name) {
             crm_debug("Setting %s callbacks for %s (%s): off", flag_name, client->name, client->id);
             client->options |= get_stonith_flag(flag_name);
         }
 
         if (flags & crm_ipc_client_response) {
             crm_ipcs_send_ack(client, id, flags, "ack", __FUNCTION__, __LINE__);
         }
         return 0;
 
     } else if (crm_str_eq(op, STONITH_OP_RELAY, TRUE)) {
         xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
 
         crm_notice("Peer %s has received a forwarded fencing request from %s to fence (%s) peer %s",
                    stonith_our_uname,
                    client ? client->name : remote_peer,
                    crm_element_value(dev, F_STONITH_ACTION),
                    crm_element_value(dev, F_STONITH_TARGET));
 
         if (initiate_remote_stonith_op(NULL, request, FALSE) != NULL) {
             rc = -EINPROGRESS;
         }
 
     } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
 
         if (remote_peer || stand_alone) {
             rc = stonith_fence(request);
 
         } else if (call_options & st_opt_manual_ack) {
             remote_fencing_op_t *rop = NULL;
             xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
             const char *target = crm_element_value(dev, F_STONITH_TARGET);
 
             crm_notice("Received manual confirmation that %s is fenced", target);
             rop = initiate_remote_stonith_op(client, request, TRUE);
             rc = stonith_manual_ack(request, rop);
 
         } else {
             const char *alternate_host = NULL;
             xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_TRACE);
             const char *target = crm_element_value(dev, F_STONITH_TARGET);
             const char *action = crm_element_value(dev, F_STONITH_ACTION);
             const char *device = crm_element_value(dev, F_STONITH_DEVICE);
 
             if (client) {
                 int tolerance = 0;
 
                 crm_notice("Client %s.%.8s wants to fence (%s) '%s' with device '%s'",
                            client->name, client->id, action, target, device ? device : "(any)");
 
                 crm_element_value_int(dev, F_STONITH_TOLERANCE, &tolerance);
 
                 if (stonith_check_fence_tolerance(tolerance, target, action)) {
                     rc = 0;
                     goto done;
                 }
 
             } else {
                 crm_notice("Peer %s wants to fence (%s) '%s' with device '%s'",
                            remote_peer, action, target, device ? device : "(any)");
             }
 
             alternate_host = check_alternate_host(target);
 
             if (alternate_host && client) {
                 const char *client_id = NULL;
 
                 crm_notice("Forwarding complex self fencing request to peer %s", alternate_host);
 
                 if (client->id) {
                     client_id = client->id;
                 } else {
                     client_id = crm_element_value(request, F_STONITH_CLIENTID);
                 }
 
                 /* Create a record of it, otherwise call_id will be 0 if we need to notify of failures */
                 create_remote_stonith_op(client_id, request, FALSE);
 
                 crm_xml_add(request, F_STONITH_OPERATION, STONITH_OP_RELAY);
                 crm_xml_add(request, F_STONITH_CLIENTID, client->id);
                 send_cluster_message(crm_get_peer(0, alternate_host), crm_msg_stonith_ng, request,
                                      FALSE);
                 rc = -EINPROGRESS;
 
             } else if (initiate_remote_stonith_op(client, request, FALSE) != NULL) {
                 rc = -EINPROGRESS;
             }
         }
 
     } else if (crm_str_eq(op, STONITH_OP_FENCE_HISTORY, TRUE)) {
         rc = stonith_fence_history(request, &data, remote_peer, call_options);
         if (call_options & st_opt_discard_reply) {
             /* we don't expect answers to the broadcast
              * we might have sent out
              */
             free_xml(data);
             return pcmk_ok;
         }
 
     } else if (crm_str_eq(op, STONITH_OP_DEVICE_ADD, TRUE)) {
         const char *device_id = NULL;
 
         rc = stonith_device_register(request, &device_id, FALSE);
         do_stonith_notify_device(call_options, op, rc, device_id);
 
     } else if (crm_str_eq(op, STONITH_OP_DEVICE_DEL, TRUE)) {
         xmlNode *dev = get_xpath_object("//" F_STONITH_DEVICE, request, LOG_ERR);
         const char *device_id = crm_element_value(dev, XML_ATTR_ID);
 
         rc = stonith_device_remove(device_id, FALSE);
         do_stonith_notify_device(call_options, op, rc, device_id);
 
     } else if (crm_str_eq(op, STONITH_OP_LEVEL_ADD, TRUE)) {
         char *device_id = NULL;
 
         rc = stonith_level_register(request, &device_id);
         do_stonith_notify_level(call_options, op, rc, device_id);
         free(device_id);
 
     } else if (crm_str_eq(op, STONITH_OP_LEVEL_DEL, TRUE)) {
         char *device_id = NULL;
 
         rc = stonith_level_remove(request, &device_id);
         do_stonith_notify_level(call_options, op, rc, device_id);
 
     } else if(safe_str_eq(op, CRM_OP_RM_NODE_CACHE)) {
         int node_id = 0;
         const char *name = NULL;
 
         crm_element_value_int(request, XML_ATTR_ID, &node_id);
         name = crm_element_value(request, XML_ATTR_UNAME);
         reap_crm_member(node_id, name);
 
         return pcmk_ok;
 
     } else {
         crm_err("Unknown %s from %s", op, client ? client->name : remote_peer);
         crm_log_xml_warn(request, "UnknownOp");
     }
 
   done:
 
     /* Always reply unless the request is in process still.
      * If in progress, a reply will happen async after the request
      * processing is finished */
     if (rc != -EINPROGRESS) {
         crm_trace("Reply handling: %p %u %u %d %d %s", client, client?client->request_id:0,
                   id, is_set(call_options, st_opt_sync_call), call_options,
                   crm_element_value(request, F_STONITH_CALLOPTS));
 
         if (is_set(call_options, st_opt_sync_call)) {
             CRM_ASSERT(client == NULL || client->request_id == id);
         }
         reply = stonith_construct_reply(request, output, data, rc);
         stonith_send_reply(reply, call_options, remote_peer, client_id);
     }
 
     free(output);
     free_xml(data);
     free_xml(reply);
 
     return rc;
 }
 
 static void
 handle_reply(crm_client_t * client, xmlNode * request, const char *remote_peer)
 {
     const char *op = crm_element_value(request, F_STONITH_OPERATION);
 
     if (crm_str_eq(op, STONITH_OP_QUERY, TRUE)) {
         process_remote_stonith_query(request);
     } else if (crm_str_eq(op, T_STONITH_NOTIFY, TRUE)) {
         process_remote_stonith_exec(request);
     } else if (crm_str_eq(op, STONITH_OP_FENCE, TRUE)) {
         /* Reply to a complex fencing op */
         process_remote_stonith_exec(request);
     } else {
         crm_err("Unknown %s reply from %s", op, client ? client->name : remote_peer);
         crm_log_xml_warn(request, "UnknownOp");
     }
 }
 
 void
 stonith_command(crm_client_t * client, uint32_t id, uint32_t flags, xmlNode * request,
                 const char *remote_peer)
 {
     int call_options = 0;
     int rc = 0;
     gboolean is_reply = FALSE;
 
     /* Copy op for reporting. The original might get freed by handle_reply()
      * before we use it in crm_debug():
      *     handle_reply()
      *     |- process_remote_stonith_exec()
      *     |-- remote_op_done()
      *     |--- handle_local_reply_and_notify()
      *     |---- crm_xml_add(...F_STONITH_OPERATION...)
      *     |--- free_xml(op->request)
      */
     char *op = crm_element_value_copy(request, F_STONITH_OPERATION);
 
     if (get_xpath_object("//" T_STONITH_REPLY, request, LOG_NEVER)) {
         is_reply = TRUE;
     }
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     crm_debug("Processing %s%s %u from %s (%16x)", op, is_reply ? " reply" : "",
               id, client ? client->name : remote_peer, call_options);
 
     if (is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(client == NULL || client->request_id == id);
     }
 
     if (is_reply) {
         handle_reply(client, request, remote_peer);
     } else {
         rc = handle_request(client, id, flags, request, remote_peer);
     }
 
     crm_debug("Processed %s%s from %s: %s (%d)", op,
               is_reply ? " reply" : "", client ? client->name : remote_peer,
               rc > 0 ? "" : pcmk_strerror(rc), rc);
 
     free(op);
 }
diff --git a/daemons/fenced/fenced_history.c b/daemons/fenced/fenced_history.c
index a1f363b38b..fb4f9be13e 100644
--- a/daemons/fenced/fenced_history.c
+++ b/daemons/fenced/fenced_history.c
@@ -1,489 +1,489 @@
 /*
  * Copyright 2009-2019 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <unistd.h>
 #include <stdlib.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <pacemaker-fenced.h>
 
 #define MAX_STONITH_HISTORY 500
 
 /*!
  * \internal
  * \brief Send a broadcast to all nodes to trigger cleanup or
  *        history synchronisation
  *
  * \param[in] history   Optional history to be attached
  * \param[in] callopts  We control cleanup via a flag in the callopts
  * \param[in] target    Cleanup can be limited to certain fence-targets
  */
 static void
 stonith_send_broadcast_history(xmlNode *history,
                                int callopts,
                                const char *target)
 {
     xmlNode *bcast = create_xml_node(NULL, "stonith_command");
     xmlNode *data = create_xml_node(NULL, __FUNCTION__);
 
     if (target) {
         crm_xml_add(data, F_STONITH_TARGET, target);
     }
     crm_xml_add(bcast, F_TYPE, T_STONITH_NG);
     crm_xml_add(bcast, F_SUBTYPE, "broadcast");
     crm_xml_add(bcast, F_STONITH_OPERATION, STONITH_OP_FENCE_HISTORY);
     crm_xml_add_int(bcast, F_STONITH_CALLOPTS, callopts);
     if (history) {
         add_node_copy(data, history);
     }
     add_message_xml(bcast, F_STONITH_CALLDATA, data);
     send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
 
     free_xml(data);
     free_xml(bcast);
 }
 
 static gboolean
 stonith_remove_history_entry (gpointer key,
                               gpointer value,
                               gpointer user_data)
 {
     remote_fencing_op_t *op = value;
     const char *target = (const char *) user_data;
 
     if ((op->state == st_failed) || (op->state == st_done)) {
         if ((target) && (strcmp(op->target, target) != 0)) {
             return FALSE;
         }
         return TRUE;
     }
 
     return FALSE; /* don't clean pending operations */
 }
 
 /*!
  * \internal
  * \brief Send out a cleanup broadcast or do a local history-cleanup
  *
  * \param[in] target    Cleanup can be limited to certain fence-targets
  * \param[in] broadcast Send out a cleanup broadcast
  */
 static void
 stonith_fence_history_cleanup(const char *target,
                               gboolean broadcast)
 {
     if (broadcast) {
         stonith_send_broadcast_history(NULL,
                                        st_opt_cleanup | st_opt_discard_reply,
                                        target);
         /* we'll do the local clean when we receive back our own broadcast */
     } else if (stonith_remote_op_list) {
         g_hash_table_foreach_remove(stonith_remote_op_list,
                              stonith_remove_history_entry,
                              (gpointer) target);
         do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
     }
 }
 
 /* keeping the length of fence-history within bounds
  * =================================================
  *
  * If things are really running wild a lot of fencing-attempts
  * might fill up the hash-map, eventually using up a lot
  * of memory and creating huge history-sync messages.
  * Before the history being synced across nodes at least
  * the reboot of a cluster-node helped keeping the
  * history within bounds even though not in a reliable
  * manner.
  *
  * stonith_remote_op_list isn't sorted for time-stamps
  * thus it would be kind of expensive to delete e.g.
  * the oldest entry if it would grow past MAX_STONITH_HISTORY
  * entries.
  * It is more efficient to purge MAX_STONITH_HISTORY/2
  * entries whenever the list grows beyond MAX_STONITH_HISTORY.
  * (sort for age + purge the MAX_STONITH_HISTORY/2 oldest)
  * That done on a per-node-base might raise the
  * probability of large syncs to occur.
  * Things like introducing a broadcast to purge
  * MAX_STONITH_HISTORY/2 entries or not sync above a certain
  * threshold coming to mind ...
  * Simplest thing though is to purge the full history
  * throughout the cluster once MAX_STONITH_HISTORY is reached.
  * On the other hand this leads to purging the history in
  * situations where it would be handy to have it probably.
  */
 
 
 static int
 op_time_sort(const void *a_voidp, const void *b_voidp)
 {
     const remote_fencing_op_t **a = (const remote_fencing_op_t **) a_voidp;
     const remote_fencing_op_t **b = (const remote_fencing_op_t **) b_voidp;
     gboolean a_pending = ((*a)->state != st_failed) && ((*a)->state != st_done);
     gboolean b_pending = ((*b)->state != st_failed) && ((*b)->state != st_done);
 
     if (a_pending && b_pending) {
         return 0;
     } else if (a_pending) {
         return -1;
     } else if (b_pending) {
         return 1;
     } else if ((*b)->completed == (*a)->completed) {
         return 0;
     } else if ((*b)->completed > (*a)->completed) {
         return 1;
     }
 
     return -1;
 }
 
 
 /*!
  * \internal
  * \brief Do a local history-trim to MAX_STONITH_HISTORY / 2 entries
  *        once over MAX_STONITH_HISTORY
  */
 void
 stonith_fence_history_trim(void)
 {
     guint num_ops;
 
     if (!stonith_remote_op_list) {
         return;
     }
     num_ops = g_hash_table_size(stonith_remote_op_list);
     if (num_ops > MAX_STONITH_HISTORY) {
         remote_fencing_op_t *ops[num_ops];
         remote_fencing_op_t *op = NULL;
         GHashTableIter iter;
         int i;
 
         crm_trace("Fencing History growing beyond limit of %d so purge "
                   "half of failed/successful attempts", MAX_STONITH_HISTORY);
 
         /* write all ops into an array */
         i = 0;
         g_hash_table_iter_init(&iter, stonith_remote_op_list);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
             ops[i++] = op;
         }
         /* run quicksort over the array so that we get pending ops
          * first and then sorted most recent to oldest
          */
         qsort(ops, num_ops, sizeof(remote_fencing_op_t *), op_time_sort);
         /* purgest oldest half of the history entries */
         for (i = MAX_STONITH_HISTORY / 2; i < num_ops; i++) {
             /* keep pending ops even if they shouldn't fill more than
              * half of our buffer
              */
             if ((ops[i]->state == st_failed) || (ops[i]->state == st_done)) {
                 g_hash_table_remove(stonith_remote_op_list, ops[i]->id);
             }
         }
         /* we've just purged valid data from the list so there is no need
          * to create a notification - if displayed it can stay
          */
     }
 }
 
 /*!
  * \internal
  * \brief Convert xml fence-history to a hash-table like stonith_remote_op_list
  *
  * \param[in] history   Fence-history in xml
  *
  * \return Fence-history as hash-table
  */
 static GHashTable *
 stonith_xml_history_to_list(xmlNode *history)
 {
     xmlNode *xml_op = NULL;
     GHashTable *rv = NULL;
 
     init_stonith_remote_op_hash_table(&rv);
 
     CRM_LOG_ASSERT(rv != NULL);
 
     for (xml_op = __xml_first_child(history); xml_op != NULL;
          xml_op = __xml_next(xml_op)) {
         remote_fencing_op_t *op = NULL;
         char *id = crm_element_value_copy(xml_op, F_STONITH_REMOTE_OP_ID);
         int completed, state;
 
         if (!id) {
             crm_warn("History to convert to hashtable has no id in entry");
             continue;
         }
 
         crm_trace("Attaching op %s to hashtable", id);
 
         op = calloc(1, sizeof(remote_fencing_op_t));
 
         op->id = id;
         op->target = crm_element_value_copy(xml_op, F_STONITH_TARGET);
         op->action = crm_element_value_copy(xml_op, F_STONITH_ACTION);
         op->originator = crm_element_value_copy(xml_op, F_STONITH_ORIGIN);
         op->delegate = crm_element_value_copy(xml_op, F_STONITH_DELEGATE);
         op->client_name = crm_element_value_copy(xml_op, F_STONITH_CLIENTNAME);
         crm_element_value_int(xml_op, F_STONITH_DATE, &completed);
         op->completed = (time_t) completed;
         crm_element_value_int(xml_op, F_STONITH_STATE, &state);
         op->state = (enum op_state) state;
 
         g_hash_table_replace(rv, id, op);
         CRM_LOG_ASSERT(g_hash_table_lookup(rv, id) != NULL);
     }
 
     return rv;
 }
 
 /*!
  * \internal
  * \brief Craft xml difference between local fence-history and a history
  *        coming from remote
  *
  * \param[in] remote_history    Fence-history as hash-table (may be NULL)
  * \param[in] add_id            If crafting the answer for an API
  *                              history-request there is no need for the id
  * \param[in] target            Optionally limit to certain fence-target
  *
  * \return The fence-history as xml
  */
 static xmlNode *
 stonith_local_history_diff(GHashTable *remote_history,
                            gboolean add_id,
                            const char *target)
 {
     xmlNode *history = NULL;
     int cnt = 0;
 
     if (stonith_remote_op_list) {
             GHashTableIter iter;
             remote_fencing_op_t *op = NULL;
 
             history = create_xml_node(NULL, F_STONITH_HISTORY_LIST);
 
             g_hash_table_iter_init(&iter, stonith_remote_op_list);
             while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
                 xmlNode *entry = NULL;
 
                 if (remote_history &&
                     g_hash_table_lookup(remote_history, op->id)) {
                     continue; /* skip entries broadcasted already */
                 }
 
                 if (target && strcmp(op->target, target) != 0) {
                     continue;
                 }
 
                 cnt++;
                 crm_trace("Attaching op %s", op->id);
                 entry = create_xml_node(history, STONITH_OP_EXEC);
                 if (add_id) {
                     crm_xml_add(entry, F_STONITH_REMOTE_OP_ID, op->id);
                 }
                 crm_xml_add(entry, F_STONITH_TARGET, op->target);
                 crm_xml_add(entry, F_STONITH_ACTION, op->action);
                 crm_xml_add(entry, F_STONITH_ORIGIN, op->originator);
                 crm_xml_add(entry, F_STONITH_DELEGATE, op->delegate);
                 crm_xml_add(entry, F_STONITH_CLIENTNAME, op->client_name);
                 crm_xml_add_int(entry, F_STONITH_DATE, op->completed);
                 crm_xml_add_int(entry, F_STONITH_STATE, op->state);
             }
     }
 
     if (cnt == 0) {
         free_xml(history);
         return NULL;
     } else {
         return history;
     }
 }
 
 /*!
  * \internal
  * \brief Merge fence-history coming from remote into local history
  *
  * \param[in] history   Hash-table holding remote history to be merged in
  */
 static void
 stonith_merge_in_history_list(GHashTable *history)
 {
     GHashTableIter iter;
     remote_fencing_op_t *op = NULL;
     gboolean updated = FALSE;
 
     if (!history) {
         return;
     }
 
     init_stonith_remote_op_hash_table(&stonith_remote_op_list);
 
     g_hash_table_iter_init(&iter, history);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&op)) {
         remote_fencing_op_t *stored_op =
             g_hash_table_lookup(stonith_remote_op_list, op->id);
 
         if (stored_op) {
             continue; // Skip existent (@TODO state-merging might be desirable)
         }
 
         updated = TRUE;
         g_hash_table_iter_steal(&iter);
 
         if ((op->state != st_failed) &&
             (op->state != st_done) &&
             safe_str_eq(op->originator, stonith_our_uname)) {
             crm_warn("received pending action we are supposed to be the "
                      "owner but it's not in our records -> fail it");
             op->state = st_failed;
             op->completed = time(NULL);
             /* use -EHOSTUNREACH to not introduce a new return-code that might
                trigger unexpected results at other places and to prevent
                remote_op_done from setting the delegate if not present
              */
-            stonith_bcast_result_to_peers(op, -EHOSTUNREACH);
+            stonith_bcast_result_to_peers(op, -EHOSTUNREACH, FALSE);
         }
 
         g_hash_table_insert(stonith_remote_op_list, op->id, op);
         /* we could trim the history here but if we bail
          * out after trim we might miss more recent entries
          * of those that might still be in the list
          * if we don't bail out trimming once is more
          * efficient and memory overhead is minimal as
          * we are just moving pointers from one hash to
          * another
          */
     }
     stonith_fence_history_trim();
     if (updated) {
         do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
     }
     g_hash_table_destroy(history); /* remove what is left */
 }
 
 /*!
  * \internal
  * \brief Handle fence-history messages (either from API or coming in as
  *        broadcasts
  *
  * \param[in] msg       Request message
  * \param[in] output    In case of a request from the API used to craft
  *                      a reply from
  * \param[in] remote_peer
  * \param[in] options   call-options from the request
  *
  * \return always success as there is actully nothing that can go really wrong
  */
 int
 stonith_fence_history(xmlNode *msg, xmlNode **output,
                       const char *remote_peer, int options)
 {
     int rc = 0;
     const char *target = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_NEVER);
     xmlNode *out_history = NULL;
 
     if (dev) {
         target = crm_element_value(dev, F_STONITH_TARGET);
         if (target && (options & st_opt_cs_nodeid)) {
             int nodeid = crm_atoi(target, NULL);
             crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY);
 
             if (node) {
                 target = node->uname;
             }
         }
     }
 
     if (options & st_opt_cleanup) {
         crm_trace("Cleaning up operations on %s in %p", target,
                   stonith_remote_op_list);
 
         stonith_fence_history_cleanup(target,
             crm_element_value(msg, F_STONITH_CALLID) != NULL);
     } else if (options & st_opt_broadcast) {
         /* there is no clear sign atm for when a history sync
            is done so send a notification for anything
            that smells like history-sync
          */
         do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY_SYNCED, 0, NULL);
         if (crm_element_value(msg, F_STONITH_CALLID)) {
             /* this is coming from the stonith-API
             *
             * craft a broadcast with node's history
             * so that every node can merge and broadcast
             * what it has on top
             */
             out_history = stonith_local_history_diff(NULL, TRUE, NULL);
             crm_trace("Broadcasting history to peers");
             stonith_send_broadcast_history(out_history,
                                         st_opt_broadcast | st_opt_discard_reply,
                                         NULL);
         } else if (remote_peer &&
                    !safe_str_eq(remote_peer, stonith_our_uname)) {
             xmlNode *history = get_xpath_object("//" F_STONITH_HISTORY_LIST,
                                                 msg, LOG_NEVER);
             GHashTable *received_history =
                 history?stonith_xml_history_to_list(history):NULL;
 
             /* either a broadcast created directly upon stonith-API request
             * or a diff as response to such a thing
             *
             * in both cases it may have a history or not
             * if we have differential data
             * merge in what we've received and stop
             * otherwise broadcast what we have on top
             * marking as differential and merge in afterwards
             */
             if (!history ||
                 !crm_is_true(crm_element_value(history,
                                                F_STONITH_DIFFERENTIAL))) {
                 out_history =
                     stonith_local_history_diff(received_history, TRUE, NULL);
                 if (out_history) {
                     crm_trace("Broadcasting history-diff to peers");
                     crm_xml_add(out_history, F_STONITH_DIFFERENTIAL,
                                 XML_BOOLEAN_TRUE);
                     stonith_send_broadcast_history(out_history,
                         st_opt_broadcast | st_opt_discard_reply,
                         NULL);
                 } else {
                     crm_trace("History-diff is empty - skip broadcast");
                 }
             }
             stonith_merge_in_history_list(received_history);
         } else {
             crm_trace("Skipping history-query-broadcast (%s%s)"
                       " we sent ourselves",
                       remote_peer?"remote-peer=":"local-ipc",
                       remote_peer?remote_peer:"");
         }
     } else {
         /* plain history request */
         crm_trace("Looking for operations on %s in %p", target,
                   stonith_remote_op_list);
         *output = stonith_local_history_diff(NULL, FALSE, target);
     }
     free_xml(out_history);
     return rc;
 }
diff --git a/daemons/fenced/fenced_remote.c b/daemons/fenced/fenced_remote.c
index c2f0a16e33..430120b312 100644
--- a/daemons/fenced/fenced_remote.c
+++ b/daemons/fenced/fenced_remote.c
@@ -1,2087 +1,2099 @@
 /*
  * Copyright 2009-2019 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <ctype.h>
 #include <regex.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipcs.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/common/util.h>
 #include <pacemaker-fenced.h>
 
 #define TIMEOUT_MULTIPLY_FACTOR 1.2
 
 /* When one fencer queries its peers for devices able to handle a fencing
  * request, each peer will reply with a list of such devices available to it.
  * Each reply will be parsed into a st_query_result_t, with each device's
  * information kept in a device_properties_t.
  */
 
 typedef struct device_properties_s {
     /* Whether access to this device has been verified */
     gboolean verified;
 
     /* The remaining members are indexed by the operation's "phase" */
 
     /* Whether this device has been executed in each phase */
     gboolean executed[st_phase_max];
     /* Whether this device is disallowed from executing in each phase */
     gboolean disallowed[st_phase_max];
     /* Action-specific timeout for each phase */
     int custom_action_timeout[st_phase_max];
     /* Action-specific maximum random delay for each phase */
     int delay_max[st_phase_max];
     /* Action-specific base delay for each phase */
     int delay_base[st_phase_max];
 } device_properties_t;
 
 typedef struct st_query_result_s {
     /* Name of peer that sent this result */
     char *host;
     /* Only try peers for non-topology based operations once */
     gboolean tried;
     /* Number of entries in the devices table */
     int ndevices;
     /* Devices available to this host that are capable of fencing the target */
     GHashTable *devices;
 } st_query_result_t;
 
 GHashTable *stonith_remote_op_list = NULL;
 
 void call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer);
 static void remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup);
 extern xmlNode *stonith_create_op(int call_id, const char *token, const char *op, xmlNode * data,
                                   int call_options);
 
 static void report_timeout_period(remote_fencing_op_t * op, int op_timeout);
 static int get_op_total_timeout(const remote_fencing_op_t *op,
                                 const st_query_result_t *chosen_peer);
 
 static gint
 sort_strings(gconstpointer a, gconstpointer b)
 {
     return strcmp(a, b);
 }
 
 static void
 free_remote_query(gpointer data)
 {
     if (data) {
         st_query_result_t *query = data;
 
         crm_trace("Free'ing query result from %s", query->host);
         g_hash_table_destroy(query->devices);
         free(query->host);
         free(query);
     }
 }
 
 void
 free_stonith_remote_op_list()
 {
     if (stonith_remote_op_list != NULL) {
         g_hash_table_destroy(stonith_remote_op_list);
         stonith_remote_op_list = NULL;
     }
 }
 
 struct peer_count_data {
     const remote_fencing_op_t *op;
     gboolean verified_only;
     int count;
 };
 
 /*!
  * \internal
  * \brief Increment a counter if a device has not been executed yet
  *
  * \param[in] key        Device ID (ignored)
  * \param[in] value      Device properties
  * \param[in] user_data  Peer count data
  */
 static void
 count_peer_device(gpointer key, gpointer value, gpointer user_data)
 {
     device_properties_t *props = (device_properties_t*)value;
     struct peer_count_data *data = user_data;
 
     if (!props->executed[data->op->phase]
         && (!data->verified_only || props->verified)) {
         ++(data->count);
     }
 }
 
 /*!
  * \internal
  * \brief Check the number of available devices in a peer's query results
  *
  * \param[in] op             Operation that results are for
  * \param[in] peer           Peer to count
  * \param[in] verified_only  Whether to count only verified devices
  *
  * \return Number of devices available to peer that were not already executed
  */
 static int
 count_peer_devices(const remote_fencing_op_t *op, const st_query_result_t *peer,
                    gboolean verified_only)
 {
     struct peer_count_data data;
 
     data.op = op;
     data.verified_only = verified_only;
     data.count = 0;
     if (peer) {
         g_hash_table_foreach(peer->devices, count_peer_device, &data);
     }
     return data.count;
 }
 
 /*!
  * \internal
  * \brief Search for a device in a query result
  *
  * \param[in] op      Operation that result is for
  * \param[in] peer    Query result for a peer
  * \param[in] device  Device ID to search for
  *
  * \return Device properties if found, NULL otherwise
  */
 static device_properties_t *
 find_peer_device(const remote_fencing_op_t *op, const st_query_result_t *peer,
                  const char *device)
 {
     device_properties_t *props = g_hash_table_lookup(peer->devices, device);
 
     return (props && !props->executed[op->phase]
            && !props->disallowed[op->phase])? props : NULL;
 }
 
 /*!
  * \internal
  * \brief Find a device in a peer's device list and mark it as executed
  *
  * \param[in]     op                     Operation that peer result is for
  * \param[in,out] peer                   Peer with results to search
  * \param[in]     device                 ID of device to mark as done
  * \param[in]     verified_devices_only  Only consider verified devices
  *
  * \return TRUE if device was found and marked, FALSE otherwise
  */
 static gboolean
 grab_peer_device(const remote_fencing_op_t *op, st_query_result_t *peer,
                  const char *device, gboolean verified_devices_only)
 {
     device_properties_t *props = find_peer_device(op, peer, device);
 
     if ((props == NULL) || (verified_devices_only && !props->verified)) {
         return FALSE;
     }
 
     crm_trace("Removing %s from %s (%d remaining)",
               device, peer->host, count_peer_devices(op, peer, FALSE));
     props->executed[op->phase] = TRUE;
     return TRUE;
 }
 
 static void
 clear_remote_op_timers(remote_fencing_op_t * op)
 {
     if (op->query_timer) {
         g_source_remove(op->query_timer);
         op->query_timer = 0;
     }
     if (op->op_timer_total) {
         g_source_remove(op->op_timer_total);
         op->op_timer_total = 0;
     }
     if (op->op_timer_one) {
         g_source_remove(op->op_timer_one);
         op->op_timer_one = 0;
     }
 }
 
 static void
 free_remote_op(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     crm_trace("Free'ing op %s for %s", op->id, op->target);
     crm_log_xml_debug(op->request, "Destroying");
 
     clear_remote_op_timers(op);
 
     free(op->id);
     free(op->action);
     free(op->delegate);
     free(op->target);
     free(op->client_id);
     free(op->client_name);
     free(op->originator);
 
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
     }
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     g_list_free_full(op->automatic_list, free);
     g_list_free(op->duplicates);
     free(op);
 }
 
 void
 init_stonith_remote_op_hash_table(GHashTable **table)
 {
     if (*table == NULL) {
         *table = g_hash_table_new_full(crm_str_hash, g_str_equal, NULL, free_remote_op);
     }
 }
 
 /*!
  * \internal
  * \brief Return an operation's originally requested action (before any remap)
  *
  * \param[in] op  Operation to check
  *
  * \return Operation's original action
  */
 static const char *
 op_requested_action(const remote_fencing_op_t *op)
 {
     return ((op->phase > st_phase_requested)? "reboot" : op->action);
 }
 
 /*!
  * \internal
  * \brief Remap a "reboot" operation to the "off" phase
  *
  * \param[in,out] op      Operation to remap
  */
 static void
 op_phase_off(remote_fencing_op_t *op)
 {
     crm_info("Remapping multiple-device reboot targeting %s (%s) to 'off'",
              op->target, op->id);
     op->phase = st_phase_off;
 
     /* Happily, "off" and "on" are shorter than "reboot", so we can reuse the
      * memory allocation at each phase.
      */
     strcpy(op->action, "off");
 }
 
 /*!
  * \internal
  * \brief Advance a remapped reboot operation to the "on" phase
  *
  * \param[in,out] op  Operation to remap
  */
 static void
 op_phase_on(remote_fencing_op_t *op)
 {
     GListPtr iter = NULL;
 
     crm_info("Remapped 'off' targeting %s complete, "
              "remapping to 'on' for %s.%.8s",
              op->target, op->client_name, op->id);
     op->phase = st_phase_on;
     strcpy(op->action, "on");
 
     /* Skip devices with automatic unfencing, because the cluster will handle it
      * when the node rejoins.
      */
     for (iter = op->automatic_list; iter != NULL; iter = iter->next) {
         GListPtr match = g_list_find_custom(op->devices_list, iter->data,
                                             sort_strings);
 
         if (match) {
             op->devices_list = g_list_remove(op->devices_list, match->data);
         }
     }
     g_list_free_full(op->automatic_list, free);
     op->automatic_list = NULL;
 
     /* Rewind device list pointer */
     op->devices = op->devices_list;
 }
 
 /*!
  * \internal
  * \brief Reset a remapped reboot operation
  *
  * \param[in,out] op  Operation to reset
  */
 static void
 undo_op_remap(remote_fencing_op_t *op)
 {
     if (op->phase > 0) {
         crm_info("Undoing remap of reboot targeting %s for %s.%.8s",
                  op->target, op->client_name, op->id);
         op->phase = st_phase_requested;
         strcpy(op->action, "reboot");
     }
 }
 
 static xmlNode *
 create_op_done_notify(remote_fencing_op_t * op, int rc)
 {
     xmlNode *notify_data = create_xml_node(NULL, T_STONITH_NOTIFY_FENCE);
 
     crm_xml_add_int(notify_data, "state", op->state);
     crm_xml_add_int(notify_data, F_STONITH_RC, rc);
     crm_xml_add(notify_data, F_STONITH_TARGET, op->target);
     crm_xml_add(notify_data, F_STONITH_ACTION, op->action);
     crm_xml_add(notify_data, F_STONITH_DELEGATE, op->delegate);
     crm_xml_add(notify_data, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(notify_data, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(notify_data, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(notify_data, F_STONITH_CLIENTNAME, op->client_name);
 
     return notify_data;
 }
 
 void
-stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc)
+stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc, gboolean op_merged)
 {
     static int count = 0;
     xmlNode *bcast = create_xml_node(NULL, T_STONITH_REPLY);
     xmlNode *notify_data = create_op_done_notify(op, rc);
 
     count++;
     crm_trace("Broadcasting result to peers");
     crm_xml_add(bcast, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(bcast, F_SUBTYPE, "broadcast");
     crm_xml_add(bcast, F_STONITH_OPERATION, T_STONITH_NOTIFY);
     crm_xml_add_int(bcast, "count", count);
+
+    if (op_merged) {
+        crm_xml_add(bcast, F_STONITH_MERGED, "true");
+    }
+
     add_message_xml(bcast, F_STONITH_CALLDATA, notify_data);
     send_cluster_message(NULL, crm_msg_stonith_ng, bcast, FALSE);
     free_xml(notify_data);
     free_xml(bcast);
 
     return;
 }
 
 static void
 handle_local_reply_and_notify(remote_fencing_op_t * op, xmlNode * data, int rc)
 {
     xmlNode *notify_data = NULL;
     xmlNode *reply = NULL;
 
     if (op->notify_sent == TRUE) {
         /* nothing to do */
         return;
     }
 
     /* Do notification with a clean data object */
     notify_data = create_op_done_notify(op, rc);
     crm_xml_add_int(data, "state", op->state);
     crm_xml_add(data, F_STONITH_TARGET, op->target);
     crm_xml_add(data, F_STONITH_OPERATION, op->action);
 
     reply = stonith_construct_reply(op->request, NULL, data, rc);
     crm_xml_add(reply, F_STONITH_DELEGATE, op->delegate);
 
     /* Send fencing OP reply to local client that initiated fencing */
     do_local_reply(reply, op->client_id, op->call_options & st_opt_sync_call, FALSE);
 
     /* bcast to all local clients that the fencing operation happend */
     do_stonith_notify(0, T_STONITH_NOTIFY_FENCE, rc, notify_data);
     do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
 
     /* mark this op as having notify's already sent */
     op->notify_sent = TRUE;
     free_xml(reply);
     free_xml(notify_data);
 }
 
 static void
 handle_duplicates(remote_fencing_op_t * op, xmlNode * data, int rc)
 {
     GListPtr iter = NULL;
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *other = iter->data;
 
         if (other->state == st_duplicate) {
             other->state = op->state;
             crm_debug("Performing duplicate notification for %s@%s.%.8s = %s",
                       other->client_name, other->originator, other->id,
                       pcmk_strerror(rc));
             remote_op_done(other, data, rc, TRUE);
 
         } else {
             // Possible if (for example) it timed out already
             crm_err("Skipping duplicate notification for %s@%s - %d", other->client_name,
                     other->originator, other->state);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Finalize a remote operation.
  *
  * \description This function has two code paths.
  *
  * Path 1. This node is the owner of the operation and needs
  *         to notify the cpg group via a broadcast as to the operation's
  *         results.
  *
  * Path 2. The cpg broadcast is received. All nodes notify their local
  *         stonith clients the operation results.
  *
  * So, The owner of the operation first notifies the cluster of the result,
  * and once that cpg notify is received back it notifies all the local clients.
  *
  * Nodes that are passive watchers of the operation will receive the
  * broadcast and only need to notify their local clients the operation finished.
  *
  * \param op, The fencing operation to finalize
  * \param data, The xml msg reply (if present) of the last delegated fencing
  *              operation.
  * \param dup, Is this operation a duplicate, if so treat it a little differently
  *             making sure the broadcast is not sent out.
  */
 static void
 remote_op_done(remote_fencing_op_t * op, xmlNode * data, int rc, int dup)
 {
     int level = LOG_ERR;
     const char *subt = NULL;
     xmlNode *local_data = NULL;
+    gboolean op_merged = FALSE;
 
     op->completed = time(NULL);
     clear_remote_op_timers(op);
     undo_op_remap(op);
 
     if (op->notify_sent == TRUE) {
         crm_err("Already sent notifications for '%s' targeting %s on %s for "
                 "client %s@%s.%.8s: %s " CRM_XS " rc=%d state=%d",
                 op->action, op->target,
                 (op->delegate? op->delegate : "unknown node"),
                 op->client_name, op->originator, op->id, pcmk_strerror(rc),
                 rc, op->state);
         goto remote_op_done_cleanup;
     }
 
     if (!op->delegate && data && rc != -ENODEV && rc != -EHOSTUNREACH) {
         xmlNode *ndata = get_xpath_object("//@" F_STONITH_DELEGATE, data,
                                           LOG_NEVER);
         if(ndata) {
             op->delegate = crm_element_value_copy(ndata, F_STONITH_DELEGATE);
         } else { 
             op->delegate = crm_element_value_copy(data, F_ORIG);
         }
     }
 
     if (data == NULL) {
         data = create_xml_node(NULL, "remote-op");
         local_data = data;
     }
 
+    if(dup) {
+        op_merged = TRUE;
+    } else if (crm_element_value(data, F_STONITH_MERGED)) {
+        op_merged = TRUE;
+    } 
+
     /* Tell everyone the operation is done, we will continue
      * with doing the local notifications once we receive
      * the broadcast back. */
     subt = crm_element_value(data, F_SUBTYPE);
     if (dup == FALSE && safe_str_neq(subt, "broadcast")) {
         /* Defer notification until the bcast message arrives */
-        stonith_bcast_result_to_peers(op, rc);
+        stonith_bcast_result_to_peers(op, rc, (op_merged? TRUE: FALSE));
         goto remote_op_done_cleanup;
     }
 
     if (rc == pcmk_ok || dup) {
         level = LOG_NOTICE;
     } else if (safe_str_neq(op->originator, stonith_our_uname)) {
         level = LOG_NOTICE;
     }
 
-    do_crm_log(level, "Operation '%s'%s%s on %s for %s@%s.%.8s: %s",
+    do_crm_log(level, "Operation '%s'%s%s on %s for %s@%s.%.8s %s: %s",
                op->action, (op->target? " targeting " : ""),
                (op->target? op->target : ""),
                (op->delegate? op->delegate : "<no-one>"),
-               op->client_name, op->originator, op->id, pcmk_strerror(rc));
+               op->client_name, op->originator, op->id, (op_merged? "(merged)" : ""), pcmk_strerror(rc));
 
     handle_local_reply_and_notify(op, data, rc);
 
     if (dup == FALSE) {
         handle_duplicates(op, data, rc);
     }
 
     /* Free non-essential parts of the record
      * Keep the record around so we can query the history
      */
     if (op->query_results) {
         g_list_free_full(op->query_results, free_remote_query);
         op->query_results = NULL;
     }
 
     if (op->request) {
         free_xml(op->request);
         op->request = NULL;
     }
 
   remote_op_done_cleanup:
     free_xml(local_data);
 }
 
 static gboolean
 remote_op_watchdog_done(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Self-fencing (%s) by %s for %s.%8s assumed complete",
                op->action, op->target, op->client_name, op->id);
     op->state = st_done;
     remote_op_done(op, NULL, pcmk_ok, FALSE);
     return FALSE;
 }
 
 static gboolean
 remote_op_timeout_one(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_one = 0;
 
     crm_notice("Peer's '%s' action targeting %s for client %s timed out " CRM_XS
                " id=%s", op->action, op->target, op->client_name, op->id);
     call_remote_stonith(op, NULL);
     return FALSE;
 }
 
 static gboolean
 remote_op_timeout(gpointer userdata)
 {
     remote_fencing_op_t *op = userdata;
 
     op->op_timer_total = 0;
 
     if (op->state == st_done) {
         crm_debug("Action '%s' targeting %s for client %s already completed "
                   CRM_XS " id=%s",
                   op->action, op->target, op->client_name, op->id);
         return FALSE;
     }
 
     crm_debug("Action '%s' targeting %s for client %s timed out "
               CRM_XS " id=%s",
               op->action, op->target, op->client_name, op->id);
 
     if (op->phase == st_phase_on) {
         /* A remapped reboot operation timed out in the "on" phase, but the
          * "off" phase completed successfully, so quit trying any further
          * devices, and return success.
          */
         remote_op_done(op, NULL, pcmk_ok, FALSE);
         return FALSE;
     }
 
     op->state = st_failed;
 
     remote_op_done(op, NULL, -ETIME, FALSE);
 
     return FALSE;
 }
 
 static gboolean
 remote_op_query_timeout(gpointer data)
 {
     remote_fencing_op_t *op = data;
 
     op->query_timer = 0;
     if (op->state == st_done) {
         crm_debug("Operation %s targeting %s already completed",
                   op->id, op->target);
     } else if (op->state == st_exec) {
         crm_debug("Operation %s targeting %s already in progress",
                   op->id, op->target);
     } else if (op->query_results) {
         crm_debug("Query %s targeting %s complete (state=%d)",
                   op->id, op->target, op->state);
         call_remote_stonith(op, NULL);
     } else {
         crm_debug("Query %s targeting %s timed out (state=%d)",
                   op->id, op->target, op->state);
         if (op->op_timer_total) {
             g_source_remove(op->op_timer_total);
             op->op_timer_total = 0;
         }
         remote_op_timeout(op);
     }
 
     return FALSE;
 }
 
 static gboolean
 topology_is_empty(stonith_topology_t *tp)
 {
     int i;
 
     if (tp == NULL) {
         return TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         if (tp->levels[i] != NULL) {
             return FALSE;
         }
     }
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Add a device to an operation's automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to add
  */
 static void
 add_required_device(remote_fencing_op_t *op, const char *device)
 {
     GListPtr match  = g_list_find_custom(op->automatic_list, device,
                                          sort_strings);
 
     if (!match) {
         op->automatic_list = g_list_prepend(op->automatic_list, strdup(device));
     }
 }
 
 /*!
  * \internal
  * \brief Remove a device from the automatic unfencing list
  *
  * \param[in,out] op      Operation to modify
  * \param[in]     device  Device ID to remove
  */
 static void
 remove_required_device(remote_fencing_op_t *op, const char *device)
 {
     GListPtr match = g_list_find_custom(op->automatic_list, device,
                                         sort_strings);
 
     if (match) {
         op->automatic_list = g_list_remove(op->automatic_list, match->data);
     }
 }
 
 /* deep copy the device list */
 static void
 set_op_device_list(remote_fencing_op_t * op, GListPtr devices)
 {
     GListPtr lpc = NULL;
 
     if (op->devices_list) {
         g_list_free_full(op->devices_list, free);
         op->devices_list = NULL;
     }
     for (lpc = devices; lpc != NULL; lpc = lpc->next) {
         op->devices_list = g_list_append(op->devices_list, strdup(lpc->data));
     }
     op->devices = op->devices_list;
 }
 
 /*!
  * \internal
  * \brief Check whether a node matches a topology target
  *
  * \param[in] tp    Topology table entry to check
  * \param[in] node  Name of node to check
  *
  * \return TRUE if node matches topology target
  */
 static gboolean
 topology_matches(const stonith_topology_t *tp, const char *node)
 {
     regex_t r_patt;
 
     CRM_CHECK(node && tp && tp->target, return FALSE);
     switch(tp->kind) {
         case 2:
             /* This level targets by attribute, so tp->target is a NAME=VALUE pair
              * of a permanent attribute applied to targeted nodes. The test below
              * relies on the locally cached copy of the CIB, so if fencing needs to
              * be done before the initial CIB is received or after a malformed CIB
              * is received, then the topology will be unable to be used.
              */
             if (node_has_attr(node, tp->target_attribute, tp->target_value)) {
                 crm_notice("Matched %s with %s by attribute", node, tp->target);
                 return TRUE;
             }
             break;
         case 1:
             /* This level targets by name, so tp->target is a regular expression
              * matching names of nodes to be targeted.
              */
 
             if (regcomp(&r_patt, tp->target_pattern, REG_EXTENDED|REG_NOSUB)) {
                 crm_info("Bad regex '%s' for fencing level", tp->target);
             } else {
                 int status = regexec(&r_patt, node, 0, NULL, 0);
 
                 regfree(&r_patt);
                 if (status == 0) {
                     crm_notice("Matched %s with %s by name", node, tp->target);
                     return TRUE;
                 }
             }
             break;
         case 0:
             crm_trace("Testing %s against %s", node, tp->target);
             return safe_str_eq(tp->target, node);
     }
     crm_trace("No match for %s with %s", node, tp->target);
     return FALSE;
 }
 
 stonith_topology_t *
 find_topology_for_host(const char *host) 
 {
     GHashTableIter tIter;
     stonith_topology_t *tp = g_hash_table_lookup(topology, host);
 
     if(tp != NULL) {
         crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
         return tp;
     }
 
     g_hash_table_iter_init(&tIter, topology);
     while (g_hash_table_iter_next(&tIter, NULL, (gpointer *) & tp)) {
         if (topology_matches(tp, host)) {
             crm_trace("Found %s for %s in %d entries", tp->target, host, g_hash_table_size(topology));
             return tp;
         }
     }
 
     crm_trace("No matches for %s in %d topology entries", host, g_hash_table_size(topology));
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Set fencing operation's device list to target's next topology level
  *
  * \param[in,out] op  Remote fencing operation to modify
  *
  * \return pcmk_ok if successful, target was not specified (i.e. queries) or
  *         target has no topology, or -EINVAL if no more topology levels to try
  */
 static int
 stonith_topology_next(remote_fencing_op_t * op)
 {
     stonith_topology_t *tp = NULL;
 
     if (op->target) {
         /* Queries don't have a target set */
         tp = find_topology_for_host(op->target);
     }
     if (topology_is_empty(tp)) {
         return pcmk_ok;
     }
 
     set_bit(op->call_options, st_opt_topology);
 
     /* This is a new level, so undo any remapping left over from previous */
     undo_op_remap(op);
 
     do {
         op->level++;
 
     } while (op->level < ST_LEVEL_MAX && tp->levels[op->level] == NULL);
 
     if (op->level < ST_LEVEL_MAX) {
         crm_trace("Attempting fencing level %d targeting %s (%d devices) "
                   "for client %s@%s.%.8s",
                   op->level, op->target, g_list_length(tp->levels[op->level]),
                   op->client_name, op->originator, op->id);
         set_op_device_list(op, tp->levels[op->level]);
 
         if (g_list_next(op->devices_list) && safe_str_eq(op->action, "reboot")) {
             /* A reboot has been requested for a topology level with multiple
              * devices. Instead of rebooting the devices sequentially, we will
              * turn them all off, then turn them all on again. (Think about
              * switched power outlets for redundant power supplies.)
              */
             op_phase_off(op);
         }
         return pcmk_ok;
     }
 
     crm_notice("All fencing options targeting %s for client %s@%s.%.8s failed",
                op->target, op->client_name, op->originator, op->id);
     return -EINVAL;
 }
 
 /*!
  * \brief Check to see if this operation is a duplicate of another in flight
  * operation. If so merge this operation into the inflight operation, and mark
  * it as a duplicate.
  */
 static void
 merge_duplicates(remote_fencing_op_t * op)
 {
     GHashTableIter iter;
     remote_fencing_op_t *other = NULL;
 
     time_t now = time(NULL);
 
     g_hash_table_iter_init(&iter, stonith_remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&other)) {
         crm_node_t *peer = NULL;
         const char *other_action = op_requested_action(other);
 
         if (other->state > st_exec) {
             /* Must be in-progress */
             continue;
         } else if (safe_str_neq(op->target, other->target)) {
             /* Must be for the same node */
             continue;
         } else if (safe_str_neq(op->action, other_action)) {
             crm_trace("Must be for the same action: %s vs. %s",
                       op->action, other_action);
             continue;
         } else if (safe_str_eq(op->client_name, other->client_name)) {
             crm_trace("Must be for different clients: %s", op->client_name);
             continue;
         } else if (safe_str_eq(other->target, other->originator)) {
             crm_trace("Can't be a suicide operation: %s", other->target);
             continue;
         }
 
         peer = crm_get_peer(0, other->originator);
         if(fencing_peer_active(peer) == FALSE) {
             crm_notice("Failing action '%s' targeting %s originating from "
                        "client %s@%s.%.8s: Originator is dead",
                        other->action, other->target, other->client_name, other->originator, other->id);
             other->state = st_failed;
             continue;
 
         } else if(other->total_timeout > 0 && now > (other->total_timeout + other->created)) {
             crm_info("Action '%s' targeting %s originating from client "
                      "%s@%s.%.8s is too old: %ld vs. %ld + %d",
                      other->action, other->target, other->client_name, other->originator, other->id,
                      now, other->created, other->total_timeout);
             continue;
         }
 
         /* There is another in-flight request to fence the same host
          * Piggyback on that instead.  If it fails, so do we.
          */
         other->duplicates = g_list_append(other->duplicates, op);
         if (other->total_timeout == 0) {
             crm_trace("Making a best-guess as to the timeout used");
             other->total_timeout = op->total_timeout =
                 TIMEOUT_MULTIPLY_FACTOR * get_op_total_timeout(op, NULL);
         }
         crm_notice("Merging stonith action '%s' targeting %s originating from "
                    "client %s.%.8s with identical request from %s@%s.%.8s (%ds)",
                    op->action, op->target, op->client_name, op->id,
                    other->client_name, other->originator, other->id,
                    other->total_timeout);
         report_timeout_period(op, other->total_timeout);
         op->state = st_duplicate;
     }
 }
 
 static uint32_t fencing_active_peers(void)
 {
     uint32_t count = 0;
     crm_node_t *entry;
     GHashTableIter gIter;
 
     g_hash_table_iter_init(&gIter, crm_peer_cache);
     while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
         if(fencing_peer_active(entry)) {
             count++;
         }
     }
     return count;
 }
 
 int
 stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op)
 {
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, msg, LOG_ERR);
 
     op->state = st_done;
     op->completed = time(NULL);
     op->delegate = strdup("a human");
 
     crm_notice("Injecting manual confirmation that %s is safely off/down",
                crm_element_value(dev, F_STONITH_TARGET));
 
     remote_op_done(op, msg, pcmk_ok, FALSE);
 
     /* Replies are sent via done_cb->stonith_send_async_reply()->do_local_reply() */
     return -EINPROGRESS;
 }
 
 /*!
  * \internal
  * \brief Create a new remote stonith operation
  *
  * \param[in] client   ID of local stonith client that initiated the operation
  * \param[in] request  The request from the client that started the operation
  * \param[in] peer     TRUE if this operation is owned by another stonith peer
  *                     (an operation owned by one peer is stored on all peers,
  *                     but only the owner executes it; all nodes get the results
  *                     once the owner finishes execution)
  */
 void *
 create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer)
 {
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_TARGET, request, LOG_NEVER);
     int call_options = 0;
 
     init_stonith_remote_op_hash_table(&stonith_remote_op_list);
 
     /* If this operation is owned by another node, check to make
      * sure we haven't already created this operation. */
     if (peer && dev) {
         const char *op_id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
 
         CRM_CHECK(op_id != NULL, return NULL);
 
         op = g_hash_table_lookup(stonith_remote_op_list, op_id);
         if (op) {
             crm_debug("%s already exists", op_id);
             return op;
         }
     }
 
     op = calloc(1, sizeof(remote_fencing_op_t));
 
     crm_element_value_int(request, F_STONITH_TIMEOUT, &(op->base_timeout));
 
     if (peer && dev) {
         op->id = crm_element_value_copy(dev, F_STONITH_REMOTE_OP_ID);
     } else {
         op->id = crm_generate_uuid();
     }
 
     g_hash_table_replace(stonith_remote_op_list, op->id, op);
     CRM_LOG_ASSERT(g_hash_table_lookup(stonith_remote_op_list, op->id) != NULL);
     crm_trace("Created %s", op->id);
 
     op->state = st_query;
     op->replies_expected = fencing_active_peers();
     op->action = crm_element_value_copy(dev, F_STONITH_ACTION);
     op->originator = crm_element_value_copy(dev, F_STONITH_ORIGIN);
     op->delegate = crm_element_value_copy(dev, F_STONITH_DELEGATE); /* May not be set */
     op->created = time(NULL);
 
     if (op->originator == NULL) {
         /* Local or relayed request */
         op->originator = strdup(stonith_our_uname);
     }
 
     CRM_LOG_ASSERT(client != NULL);
     if (client) {
         op->client_id = strdup(client);
     }
 
     op->client_name = crm_element_value_copy(request, F_STONITH_CLIENTNAME);
 
     op->target = crm_element_value_copy(dev, F_STONITH_TARGET);
     op->request = copy_xml(request);    /* TODO: Figure out how to avoid this */
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     op->call_options = call_options;
 
     crm_element_value_int(request, F_STONITH_CALLID, &(op->client_callid));
 
     crm_trace("%s new stonith op %s ('%s' targeting %s for client %s)",
               (peer && dev)? "Recorded" : "Generated", op->id, op->action,
               op->target, op->client_name);
 
     if (op->call_options & st_opt_cs_nodeid) {
         int nodeid = crm_atoi(op->target, NULL);
         crm_node_t *node = crm_find_known_peer_full(nodeid, NULL, CRM_GET_PEER_ANY);
 
         /* Ensure the conversion only happens once */
         op->call_options &= ~st_opt_cs_nodeid;
 
         if (node && node->uname) {
             free(op->target);
             op->target = strdup(node->uname);
 
         } else {
             crm_warn("Could not expand nodeid '%s' into a host name", op->target);
         }
     }
 
     /* check to see if this is a duplicate operation of another in-flight operation */
     merge_duplicates(op);
 
     if (op->state != st_duplicate) {
         /* kick history readers */
         do_stonith_notify(0, T_STONITH_NOTIFY_HISTORY, 0, NULL);
     }
 
     /* safe to trim as long as that doesn't touch pending ops */
     stonith_fence_history_trim();
 
     return op;
 }
 
 remote_fencing_op_t *
 initiate_remote_stonith_op(crm_client_t * client, xmlNode * request, gboolean manual_ack)
 {
     int query_timeout = 0;
     xmlNode *query = NULL;
     const char *client_id = NULL;
     remote_fencing_op_t *op = NULL;
 
     if (client) {
         client_id = client->id;
     } else {
         client_id = crm_element_value(request, F_STONITH_CLIENTID);
     }
 
     CRM_LOG_ASSERT(client_id != NULL);
     op = create_remote_stonith_op(client_id, request, FALSE);
     op->owner = TRUE;
     if (manual_ack) {
         crm_notice("Initiating manual confirmation for %s: %s",
                    op->target, op->id);
         return op;
     }
 
     CRM_CHECK(op->action, return NULL);
 
     if (stonith_topology_next(op) != pcmk_ok) {
         op->state = st_failed;
     }
 
     switch (op->state) {
         case st_failed:
             crm_warn("Could not request peer fencing (%s) targeting %s "
                      CRM_XS " id=%s", op->action, op->target, op->id);
             remote_op_done(op, NULL, -EINVAL, FALSE);
             return op;
 
         case st_duplicate:
             crm_info("Requesting peer fencing (%s) targeting %s (duplicate) "
                      CRM_XS " id=%s", op->action, op->target, op->id);
             return op;
 
         default:
             crm_notice("Requesting peer fencing (%s) targeting %s "
                        CRM_XS " id=%s state=%d",
                        op->action, op->target, op->id, op->state);
     }
 
     query = stonith_create_op(op->client_callid, op->id, STONITH_OP_QUERY,
                               NULL, op->call_options);
 
     crm_xml_add(query, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(query, F_STONITH_TARGET, op->target);
     crm_xml_add(query, F_STONITH_ACTION, op_requested_action(op));
     crm_xml_add(query, F_STONITH_ORIGIN, op->originator);
     crm_xml_add(query, F_STONITH_CLIENTID, op->client_id);
     crm_xml_add(query, F_STONITH_CLIENTNAME, op->client_name);
     crm_xml_add_int(query, F_STONITH_TIMEOUT, op->base_timeout);
 
     send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
     free_xml(query);
 
     query_timeout = op->base_timeout * TIMEOUT_MULTIPLY_FACTOR;
     op->query_timer = g_timeout_add((1000 * query_timeout), remote_op_query_timeout, op);
 
     return op;
 }
 
 enum find_best_peer_options {
     /*! Skip checking the target peer for capable fencing devices */
     FIND_PEER_SKIP_TARGET = 0x0001,
     /*! Only check the target peer for capable fencing devices */
     FIND_PEER_TARGET_ONLY = 0x0002,
     /*! Skip peers and devices that are not verified */
     FIND_PEER_VERIFIED_ONLY = 0x0004,
 };
 
 static st_query_result_t *
 find_best_peer(const char *device, remote_fencing_op_t * op, enum find_best_peer_options options)
 {
     GListPtr iter = NULL;
     gboolean verified_devices_only = (options & FIND_PEER_VERIFIED_ONLY) ? TRUE : FALSE;
 
     if (!device && is_set(op->call_options, st_opt_topology)) {
         return NULL;
     }
 
     for (iter = op->query_results; iter != NULL; iter = iter->next) {
         st_query_result_t *peer = iter->data;
 
         crm_trace("Testing result from %s targeting %s with %d devices: %d %x",
                   peer->host, op->target, peer->ndevices, peer->tried, options);
         if ((options & FIND_PEER_SKIP_TARGET) && safe_str_eq(peer->host, op->target)) {
             continue;
         }
         if ((options & FIND_PEER_TARGET_ONLY) && safe_str_neq(peer->host, op->target)) {
             continue;
         }
 
         if (is_set(op->call_options, st_opt_topology)) {
 
             if (grab_peer_device(op, peer, device, verified_devices_only)) {
                 return peer;
             }
 
         } else if ((peer->tried == FALSE)
                    && count_peer_devices(op, peer, verified_devices_only)) {
 
             /* No topology: Use the current best peer */
             crm_trace("Simple fencing");
             return peer;
         }
     }
 
     return NULL;
 }
 
 static st_query_result_t *
 stonith_choose_peer(remote_fencing_op_t * op)
 {
     const char *device = NULL;
     st_query_result_t *peer = NULL;
     uint32_t active = fencing_active_peers();
 
     do {
         if (op->devices) {
             device = op->devices->data;
             crm_trace("Checking for someone to fence (%s) %s with %s",
                       op->action, op->target, device);
         } else {
             crm_trace("Checking for someone to fence (%s) %s",
                       op->action, op->target);
         }
 
         /* Best choice is a peer other than the target with verified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET|FIND_PEER_VERIFIED_ONLY);
         if (peer) {
             crm_trace("Found verified peer %s for %s", peer->host, device?device:"<any>");
             return peer;
         }
 
         if(op->query_timer != 0 && op->replies < QB_MIN(op->replies_expected, active)) {
             crm_trace("Waiting before looking for unverified devices to fence %s", op->target);
             return NULL;
         }
 
         /* If no other peer has verified access, next best is unverified access */
         peer = find_best_peer(device, op, FIND_PEER_SKIP_TARGET);
         if (peer) {
             crm_trace("Found best unverified peer %s", peer->host);
             return peer;
         }
 
         /* If no other peer can do it, last option is self-fencing
          * (which is never allowed for the "on" phase of a remapped reboot)
          */
         if (op->phase != st_phase_on) {
             peer = find_best_peer(device, op, FIND_PEER_TARGET_ONLY);
             if (peer) {
                 crm_trace("%s will fence itself", peer->host);
                 return peer;
             }
         }
 
         /* Try the next fencing level if there is one (unless we're in the "on"
          * phase of a remapped "reboot", because we ignore errors in that case)
          */
     } while ((op->phase != st_phase_on)
              && is_set(op->call_options, st_opt_topology)
              && stonith_topology_next(op) == pcmk_ok);
 
     crm_notice("Couldn't find anyone to fence (%s) %s with %s",
                op->action, op->target, (device? device : "any device"));
     return NULL;
 }
 
 static int
 get_device_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer,
                    const char *device)
 {
     device_properties_t *props;
 
     if (!peer || !device) {
         return op->base_timeout;
     }
 
     props = g_hash_table_lookup(peer->devices, device);
     if (!props) {
         return op->base_timeout;
     }
 
     return (props->custom_action_timeout[op->phase]?
            props->custom_action_timeout[op->phase] : op->base_timeout)
            + props->delay_max[op->phase];
 }
 
 struct timeout_data {
     const remote_fencing_op_t *op;
     const st_query_result_t *peer;
     int total_timeout;
 };
 
 /*!
  * \internal
  * \brief Add timeout to a total if device has not been executed yet
  *
  * \param[in] key        GHashTable key (device ID)
  * \param[in] value      GHashTable value (device properties)
  * \param[in] user_data  Timeout data
  */
 static void
 add_device_timeout(gpointer key, gpointer value, gpointer user_data)
 {
     const char *device_id = key;
     device_properties_t *props = value;
     struct timeout_data *timeout = user_data;
 
     if (!props->executed[timeout->op->phase]
         && !props->disallowed[timeout->op->phase]) {
         timeout->total_timeout += get_device_timeout(timeout->op,
                                                      timeout->peer, device_id);
     }
 }
 
 static int
 get_peer_timeout(const remote_fencing_op_t *op, const st_query_result_t *peer)
 {
     struct timeout_data timeout;
 
     timeout.op = op;
     timeout.peer = peer;
     timeout.total_timeout = 0;
 
     g_hash_table_foreach(peer->devices, add_device_timeout, &timeout);
 
     return (timeout.total_timeout? timeout.total_timeout : op->base_timeout);
 }
 
 static int
 get_op_total_timeout(const remote_fencing_op_t *op,
                      const st_query_result_t *chosen_peer)
 {
     int total_timeout = 0;
     stonith_topology_t *tp = find_topology_for_host(op->target);
 
     if (is_set(op->call_options, st_opt_topology) && tp) {
         int i;
         GListPtr device_list = NULL;
         GListPtr iter = NULL;
 
         /* Yep, this looks scary, nested loops all over the place.
          * Here is what is going on.
          * Loop1: Iterate through fencing levels.
          * Loop2: If a fencing level has devices, loop through each device
          * Loop3: For each device in a fencing level, see what peer owns it
          *        and what that peer has reported the timeout is for the device.
          */
         for (i = 0; i < ST_LEVEL_MAX; i++) {
             if (!tp->levels[i]) {
                 continue;
             }
             for (device_list = tp->levels[i]; device_list; device_list = device_list->next) {
                 for (iter = op->query_results; iter != NULL; iter = iter->next) {
                     const st_query_result_t *peer = iter->data;
 
                     if (find_peer_device(op, peer, device_list->data)) {
                         total_timeout += get_device_timeout(op, peer,
                                                             device_list->data);
                         break;
                     }
                 }               /* End Loop3: match device with peer that owns device, find device's timeout period */
             }                   /* End Loop2: iterate through devices at a specific level */
         }                       /*End Loop1: iterate through fencing levels */
 
     } else if (chosen_peer) {
         total_timeout = get_peer_timeout(op, chosen_peer);
     } else {
         total_timeout = op->base_timeout;
     }
 
     return total_timeout ? total_timeout : op->base_timeout;
 }
 
 static void
 report_timeout_period(remote_fencing_op_t * op, int op_timeout)
 {
     GListPtr iter = NULL;
     xmlNode *update = NULL;
     const char *client_node = NULL;
     const char *client_id = NULL;
     const char *call_id = NULL;
 
     if (op->call_options & st_opt_sync_call) {
         /* There is no reason to report the timeout for a synchronous call. It
          * is impossible to use the reported timeout to do anything when the client
          * is blocking for the response.  This update is only important for
          * async calls that require a callback to report the results in. */
         return;
     } else if (!op->request) {
         return;
     }
 
     crm_trace("Reporting timeout for %s.%.8s", op->client_name, op->id);
     client_node = crm_element_value(op->request, F_STONITH_CLIENTNODE);
     call_id = crm_element_value(op->request, F_STONITH_CALLID);
     client_id = crm_element_value(op->request, F_STONITH_CLIENTID);
     if (!client_node || !call_id || !client_id) {
         return;
     }
 
     if (safe_str_eq(client_node, stonith_our_uname)) {
         /* The client is connected to this node, send the update direclty to them */
         do_stonith_async_timeout_update(client_id, call_id, op_timeout);
         return;
     }
 
     /* The client is connected to another node, relay this update to them */
     update = stonith_create_op(op->client_callid, op->id, STONITH_OP_TIMEOUT_UPDATE, NULL, 0);
     crm_xml_add(update, F_STONITH_REMOTE_OP_ID, op->id);
     crm_xml_add(update, F_STONITH_CLIENTID, client_id);
     crm_xml_add(update, F_STONITH_CALLID, call_id);
     crm_xml_add_int(update, F_STONITH_TIMEOUT, op_timeout);
 
     send_cluster_message(crm_get_peer(0, client_node), crm_msg_stonith_ng, update, FALSE);
 
     free_xml(update);
 
     for (iter = op->duplicates; iter != NULL; iter = iter->next) {
         remote_fencing_op_t *dup = iter->data;
 
         crm_trace("Reporting timeout for duplicate %s.%.8s", dup->client_name, dup->id);
         report_timeout_period(iter->data, op_timeout);
     }
 }
 
 /*!
  * \internal
  * \brief Advance an operation to the next device in its topology
  *
  * \param[in,out] op      Operation to advance
  * \param[in]     device  ID of device just completed
  * \param[in]     msg     XML reply that contained device result (if available)
  * \param[in]     rc      Return code of device's execution
  */
 static void
 advance_op_topology(remote_fencing_op_t *op, const char *device, xmlNode *msg,
                     int rc)
 {
     /* Advance to the next device at this topology level, if any */
     if (op->devices) {
         op->devices = op->devices->next;
     }
 
     /* Handle automatic unfencing if an "on" action was requested */
     if ((op->phase == st_phase_requested) && safe_str_eq(op->action, "on")) {
         /* If the device we just executed was required, it's not anymore */
         remove_required_device(op, device);
 
         /* If there are no more devices at this topology level, run through any
          * remaining devices with automatic unfencing
          */
         if (op->devices == NULL) {
             op->devices = op->automatic_list;
         }
     }
 
     if ((op->devices == NULL) && (op->phase == st_phase_off)) {
         /* We're done with this level and with required devices, but we had
          * remapped "reboot" to "off", so start over with "on". If any devices
          * need to be turned back on, op->devices will be non-NULL after this.
          */
         op_phase_on(op);
     }
 
     if (op->devices) {
         /* Necessary devices remain, so execute the next one */
         crm_trace("Next targeting %s on behalf of %s@%s (rc was %d)",
                   op->target, op->originator, op->client_name, rc);
         call_remote_stonith(op, NULL);
     } else {
         /* We're done with all devices and phases, so finalize operation */
         crm_trace("Marking complex fencing op targeting %s as complete",
                   op->target);
         op->state = st_done;
         remote_op_done(op, msg, rc, FALSE);
     }
 }
 
 void
 call_remote_stonith(remote_fencing_op_t * op, st_query_result_t * peer)
 {
     const char *device = NULL;
     int timeout = op->base_timeout;
 
     crm_trace("State for %s.%.8s: %s %d", op->target, op->client_name, op->id, op->state);
     if (peer == NULL && !is_set(op->call_options, st_opt_topology)) {
         peer = stonith_choose_peer(op);
     }
 
     if (!op->op_timer_total) {
         int total_timeout = get_op_total_timeout(op, peer);
 
         op->total_timeout = TIMEOUT_MULTIPLY_FACTOR * total_timeout;
         op->op_timer_total = g_timeout_add(1000 * op->total_timeout, remote_op_timeout, op);
         report_timeout_period(op, op->total_timeout);
         crm_info("Total timeout set to %d for peer's fencing targeting %s for %s"
                  CRM_XS "id=%s",
                  total_timeout, op->target, op->client_name, op->id);
     }
 
     if (is_set(op->call_options, st_opt_topology) && op->devices) {
         /* Ignore any peer preference, they might not have the device we need */
         /* When using topology, stonith_choose_peer() removes the device from
          * further consideration, so be sure to calculate timeout beforehand */
         peer = stonith_choose_peer(op);
 
         device = op->devices->data;
         timeout = get_device_timeout(op, peer, device);
     }
 
     if (peer) {
         int timeout_one = 0;
         xmlNode *remote_op = stonith_create_op(op->client_callid, op->id, STONITH_OP_FENCE, NULL, 0);
 
         crm_xml_add(remote_op, F_STONITH_REMOTE_OP_ID, op->id);
         crm_xml_add(remote_op, F_STONITH_TARGET, op->target);
         crm_xml_add(remote_op, F_STONITH_ACTION, op->action);
         crm_xml_add(remote_op, F_STONITH_ORIGIN, op->originator);
         crm_xml_add(remote_op, F_STONITH_CLIENTID, op->client_id);
         crm_xml_add(remote_op, F_STONITH_CLIENTNAME, op->client_name);
         crm_xml_add_int(remote_op, F_STONITH_TIMEOUT, timeout);
         crm_xml_add_int(remote_op, F_STONITH_CALLOPTS, op->call_options);
 
         if (device) {
             timeout_one = TIMEOUT_MULTIPLY_FACTOR *
                           get_device_timeout(op, peer, device);
             crm_notice("Requesting that %s perform '%s' action targeting %s "
                        "using '%s' " CRM_XS " for client %s (%ds)",
                        peer->host, op->action, op->target, device,
                        op->client_name, timeout_one);
             crm_xml_add(remote_op, F_STONITH_DEVICE, device);
             crm_xml_add(remote_op, F_STONITH_MODE, "slave");
 
         } else {
             timeout_one = TIMEOUT_MULTIPLY_FACTOR * get_peer_timeout(op, peer);
             crm_notice("Requesting that %s perform '%s' action targeting %s "
                        CRM_XS " for client %s (%ds, %lds)",
                        peer->host, op->action, op->target, op->client_name,
                        timeout_one, stonith_watchdog_timeout_ms);
             crm_xml_add(remote_op, F_STONITH_MODE, "smart");
         }
 
         op->state = st_exec;
         if (op->op_timer_one) {
             g_source_remove(op->op_timer_one);
         }
 
         if(stonith_watchdog_timeout_ms > 0 && device && safe_str_eq(device, "watchdog")) {
             crm_notice("Waiting %lds for %s to self-fence (%s) for client %s.%.8s",
                        stonith_watchdog_timeout_ms/1000, op->target, op->action,
                        op->client_name, op->id);
             op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
 
             /* TODO check devices to verify watchdog will be in use */
         } else if(stonith_watchdog_timeout_ms > 0
                   && safe_str_eq(peer->host, op->target)
                   && safe_str_neq(op->action, "on")) {
             crm_notice("Waiting %lds for %s to self-fence (%s) for client %s.%.8s",
                        stonith_watchdog_timeout_ms/1000, op->target, op->action,
                        op->client_name, op->id);
             op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
 
         } else {
             op->op_timer_one = g_timeout_add((1000 * timeout_one), remote_op_timeout_one, op);
         }
 
 
         send_cluster_message(crm_get_peer(0, peer->host), crm_msg_stonith_ng, remote_op, FALSE);
         peer->tried = TRUE;
         free_xml(remote_op);
         return;
 
     } else if (op->phase == st_phase_on) {
         /* A remapped "on" cannot be executed, but the node was already
          * turned off successfully, so ignore the error and continue.
          */
         crm_warn("Ignoring %s 'on' failure (no capable peers) targeting %s "
                  "after successful 'off'", device, op->target);
         advance_op_topology(op, device, NULL, pcmk_ok);
         return;
 
     } else if (op->owner == FALSE) {
         crm_err("Fencing (%s) targeting %s for client %s is not ours to control",
                 op->action, op->target, op->client_name);
 
     } else if (op->query_timer == 0) {
         /* We've exhausted all available peers */
         crm_info("No remaining peers capable of fencing (%s) %s for client %s "
                  CRM_XS " state=%d",
                  op->action, op->target, op->client_name, op->state);
         CRM_LOG_ASSERT(op->state < st_done);
         remote_op_timeout(op);
 
     } else if(op->replies >= op->replies_expected || op->replies >= fencing_active_peers()) {
         int rc = -EHOSTUNREACH;
 
         /* if the operation never left the query state,
          * but we have all the expected replies, then no devices
          * are available to execute the fencing operation. */
 
         if(stonith_watchdog_timeout_ms && (device == NULL || safe_str_eq(device, "watchdog"))) {
             crm_notice("Waiting %lds for %s to self-fence (%s) for client %s.%.8s",
                      stonith_watchdog_timeout_ms/1000, op->target,
                      op->action, op->client_name, op->id);
 
             op->op_timer_one = g_timeout_add(stonith_watchdog_timeout_ms, remote_op_watchdog_done, op);
             return;
         }
 
         if (op->state == st_query) {
            crm_info("No peers (out of %d) have devices capable of fencing "
                     "(%s) %s for client %s " CRM_XS " state=%d",
                     op->replies, op->action, op->target, op->client_name,
                     op->state);
 
             rc = -ENODEV;
         } else {
            crm_info("No peers (out of %d) are capable of fencing (%s) %s "
                     "for client %s " CRM_XS " state=%d",
                     op->replies, op->action, op->target, op->client_name,
                     op->state);
         }
 
         op->state = st_failed;
         remote_op_done(op, NULL, rc, FALSE);
 
     } else {
         crm_info("Waiting for additional peers capable of fencing (%s) %s%s%s "
                  "for client %s%.8s",
                  op->action, op->target, (device? " with " : ""),
                  (device? device : ""), op->client_name, op->id);
     }
 }
 
 /*!
  * \internal
  * \brief Comparison function for sorting query results
  *
  * \param[in] a  GList item to compare
  * \param[in] b  GList item to compare
  *
  * \return Per the glib documentation, "a negative integer if the first value
  *         comes before the second, 0 if they are equal, or a positive integer
  *         if the first value comes after the second."
  */
 static gint
 sort_peers(gconstpointer a, gconstpointer b)
 {
     const st_query_result_t *peer_a = a;
     const st_query_result_t *peer_b = b;
 
     return (peer_b->ndevices - peer_a->ndevices);
 }
 
 /*!
  * \internal
  * \brief Determine if all the devices in the topology are found or not
  */
 static gboolean
 all_topology_devices_found(remote_fencing_op_t * op)
 {
     GListPtr device = NULL;
     GListPtr iter = NULL;
     device_properties_t *match = NULL;
     stonith_topology_t *tp = NULL;
     gboolean skip_target = FALSE;
     int i;
 
     tp = find_topology_for_host(op->target);
     if (!tp) {
         return FALSE;
     }
     if (safe_str_eq(op->action, "off") || safe_str_eq(op->action, "reboot")) {
         /* Don't count the devices on the target node if we are killing
          * the target node. */
         skip_target = TRUE;
     }
 
     for (i = 0; i < ST_LEVEL_MAX; i++) {
         for (device = tp->levels[i]; device; device = device->next) {
             match = NULL;
             for (iter = op->query_results; iter && !match; iter = iter->next) {
                 st_query_result_t *peer = iter->data;
 
                 if (skip_target && safe_str_eq(peer->host, op->target)) {
                     continue;
                 }
                 match = find_peer_device(op, peer, device->data);
             }
             if (!match) {
                 return FALSE;
             }
         }
     }
 
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Parse action-specific device properties from XML
  *
  * \param[in]     msg     XML element containing the properties
  * \param[in]     peer    Name of peer that sent XML (for logs)
  * \param[in]     device  Device ID (for logs)
  * \param[in]     action  Action the properties relate to (for logs)
  * \param[in]     phase   Phase the properties relate to
  * \param[in,out] props   Device properties to update
  */
 static void
 parse_action_specific(xmlNode *xml, const char *peer, const char *device,
                       const char *action, remote_fencing_op_t *op,
                       enum st_remap_phase phase, device_properties_t *props)
 {
     props->custom_action_timeout[phase] = 0;
     crm_element_value_int(xml, F_STONITH_ACTION_TIMEOUT,
                           &props->custom_action_timeout[phase]);
     if (props->custom_action_timeout[phase]) {
         crm_trace("Peer %s with device %s returned %s action timeout %d",
                   peer, device, action, props->custom_action_timeout[phase]);
     }
 
     props->delay_max[phase] = 0;
     crm_element_value_int(xml, F_STONITH_DELAY_MAX, &props->delay_max[phase]);
     if (props->delay_max[phase]) {
         crm_trace("Peer %s with device %s returned maximum of random delay %d for %s",
                   peer, device, props->delay_max[phase], action);
     }
 
     props->delay_base[phase] = 0;
     crm_element_value_int(xml, F_STONITH_DELAY_BASE, &props->delay_base[phase]);
     if (props->delay_base[phase]) {
         crm_trace("Peer %s with device %s returned base delay %d for %s",
                   peer, device, props->delay_base[phase], action);
     }
 
     /* Handle devices with automatic unfencing */
     if (safe_str_eq(action, "on")) {
         int required = 0;
 
         crm_element_value_int(xml, F_STONITH_DEVICE_REQUIRED, &required);
         if (required) {
             crm_trace("Peer %s requires device %s to execute for action %s",
                       peer, device, action);
             add_required_device(op, device);
         }
     }
 
     /* If a reboot is remapped to off+on, it's possible that a node is allowed
      * to perform one action but not another.
      */
     if (crm_is_true(crm_element_value(xml, F_STONITH_ACTION_DISALLOWED))) {
         props->disallowed[phase] = TRUE;
         crm_trace("Peer %s is disallowed from executing %s for device %s",
                   peer, action, device);
     }
 }
 
 /*!
  * \internal
  * \brief Parse one device's properties from peer's XML query reply
  *
  * \param[in]     xml       XML node containing device properties
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in,out] result    Peer's results
  * \param[in]     device    ID of device being parsed
  */
 static void
 add_device_properties(xmlNode *xml, remote_fencing_op_t *op,
                       st_query_result_t *result, const char *device)
 {
     xmlNode *child;
     int verified = 0;
     device_properties_t *props = calloc(1, sizeof(device_properties_t));
 
     /* Add a new entry to this result's devices list */
     CRM_ASSERT(props != NULL);
     g_hash_table_insert(result->devices, strdup(device), props);
 
     /* Peers with verified (monitored) access will be preferred */
     crm_element_value_int(xml, F_STONITH_DEVICE_VERIFIED, &verified);
     if (verified) {
         crm_trace("Peer %s has confirmed a verified device %s",
                   result->host, device);
         props->verified = TRUE;
     }
 
     /* Parse action-specific device properties */
     parse_action_specific(xml, result->host, device, op_requested_action(op),
                           op, st_phase_requested, props);
     for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) {
         /* Replies for "reboot" operations will include the action-specific
          * values for "off" and "on" in child elements, just in case the reboot
          * winds up getting remapped.
          */
         if (safe_str_eq(ID(child), "off")) {
             parse_action_specific(child, result->host, device, "off",
                                   op, st_phase_off, props);
         } else if (safe_str_eq(ID(child), "on")) {
             parse_action_specific(child, result->host, device, "on",
                                   op, st_phase_on, props);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Parse a peer's XML query reply and add it to operation's results
  *
  * \param[in,out] op        Operation that query and reply relate to
  * \param[in]     host      Name of peer that sent this reply
  * \param[in]     ndevices  Number of devices expected in reply
  * \param[in]     xml       XML node containing device list
  *
  * \return Newly allocated result structure with parsed reply
  */
 static st_query_result_t *
 add_result(remote_fencing_op_t *op, const char *host, int ndevices, xmlNode *xml)
 {
     st_query_result_t *result = calloc(1, sizeof(st_query_result_t));
     xmlNode *child;
 
     CRM_CHECK(result != NULL, return NULL);
     result->host = strdup(host);
     result->devices = crm_str_table_new();
 
     /* Each child element describes one capable device available to the peer */
     for (child = __xml_first_child(xml); child != NULL; child = __xml_next(child)) {
         const char *device = ID(child);
 
         if (device) {
             add_device_properties(child, op, result, device);
         }
     }
 
     result->ndevices = g_hash_table_size(result->devices);
     CRM_CHECK(ndevices == result->ndevices,
               crm_err("Query claimed to have %d devices but %d found",
                       ndevices, result->ndevices));
 
     op->query_results = g_list_insert_sorted(op->query_results, result, sort_peers);
     return result;
 }
 
 /*!
  * \internal
  * \brief Handle a peer's reply to our fencing query
  *
  * Parse a query result from XML and store it in the remote operation
  * table, and when enough replies have been received, issue a fencing request.
  *
  * \param[in] msg  XML reply received
  *
  * \return pcmk_ok on success, -errno on error
  *
  * \note See initiate_remote_stonith_op() for how the XML query was initially
  *       formed, and stonith_query() for how the peer formed its XML reply.
  */
 int
 process_remote_stonith_query(xmlNode * msg)
 {
     int ndevices = 0;
     gboolean host_is_target = FALSE;
     gboolean have_all_replies = FALSE;
     const char *id = NULL;
     const char *host = NULL;
     remote_fencing_op_t *op = NULL;
     st_query_result_t *result = NULL;
     uint32_t replies_expected;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@" F_STONITH_AVAILABLE_DEVICES, msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
     crm_element_value_int(dev, F_STONITH_AVAILABLE_DEVICES, &ndevices);
 
     op = g_hash_table_lookup(stonith_remote_op_list, id);
     if (op == NULL) {
         crm_debug("Received query reply for unknown or expired operation %s",
                   id);
         return -EOPNOTSUPP;
     }
 
     replies_expected = fencing_active_peers();
     if (op->replies_expected < replies_expected) {
         replies_expected = op->replies_expected;
     }
     if ((++op->replies >= replies_expected) && (op->state == st_query)) {
         have_all_replies = TRUE;
     }
     host = crm_element_value(msg, F_ORIG);
     host_is_target = safe_str_eq(host, op->target);
 
     crm_info("Query result %d of %d from %s for %s/%s (%d devices) %s",
              op->replies, replies_expected, host,
              op->target, op->action, ndevices, id);
     if (ndevices > 0) {
         result = add_result(op, host, ndevices, dev);
     }
 
     if (is_set(op->call_options, st_opt_topology)) {
         /* If we start the fencing before all the topology results are in,
          * it is possible fencing levels will be skipped because of the missing
          * query results. */
         if (op->state == st_query && all_topology_devices_found(op)) {
             /* All the query results are in for the topology, start the fencing ops. */
             crm_trace("All topology devices found");
             call_remote_stonith(op, result);
 
         } else if (have_all_replies) {
             crm_info("All topology query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             call_remote_stonith(op, NULL);
         }
 
     } else if (op->state == st_query) {
         int nverified = count_peer_devices(op, result, TRUE);
 
         /* We have a result for a non-topology fencing op that looks promising,
          * go ahead and start fencing before query timeout */
         if (result && (host_is_target == FALSE) && nverified) {
             /* we have a verified device living on a peer that is not the target */
             crm_trace("Found %d verified devices", nverified);
             call_remote_stonith(op, result);
 
         } else if (have_all_replies) {
             crm_info("All query replies have arrived, continuing (%d expected/%d received) ",
                      replies_expected, op->replies);
             call_remote_stonith(op, NULL);
 
         } else {
             crm_trace("Waiting for more peer results before launching fencing operation");
         }
 
     } else if (result && (op->state == st_done)) {
         crm_info("Discarding query result from %s (%d devices): Operation is in state %d",
                  result->host, result->ndevices, op->state);
     }
 
     return pcmk_ok;
 }
 
 /*!
  * \internal
  * \brief Handle a peer's reply to a fencing request
  *
  * Parse a fencing reply from XML, and either finalize the operation
  * or attempt another device as appropriate.
  *
  * \param[in] msg  XML reply received
  *
  * \return pcmk_ok on success, -errno on error
  */
 int
 process_remote_stonith_exec(xmlNode * msg)
 {
     int rc = 0;
     const char *id = NULL;
     const char *device = NULL;
     remote_fencing_op_t *op = NULL;
     xmlNode *dev = get_xpath_object("//@" F_STONITH_REMOTE_OP_ID, msg, LOG_ERR);
 
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     id = crm_element_value(dev, F_STONITH_REMOTE_OP_ID);
     CRM_CHECK(id != NULL, return -EPROTO);
 
     dev = get_xpath_object("//@" F_STONITH_RC, msg, LOG_ERR);
     CRM_CHECK(dev != NULL, return -EPROTO);
 
     crm_element_value_int(dev, F_STONITH_RC, &rc);
 
     device = crm_element_value(dev, F_STONITH_DEVICE);
 
     if (stonith_remote_op_list) {
         op = g_hash_table_lookup(stonith_remote_op_list, id);
     }
 
     if (op == NULL && rc == pcmk_ok) {
         /* Record successful fencing operations */
         const char *client_id = crm_element_value(dev, F_STONITH_CLIENTID);
 
         op = create_remote_stonith_op(client_id, dev, TRUE);
     }
 
     if (op == NULL) {
         /* Could be for an event that began before we started */
         /* TODO: Record the op for later querying */
         crm_info("Received peer result of unknown or expired operation %s", id);
         return -EOPNOTSUPP;
     }
 
     if (op->devices && device && safe_str_neq(op->devices->data, device)) {
         crm_err("Received outdated reply for device %s (instead of %s) to "
                 "fence (%s) %s. Operation already timed out at peer level.",
                 device, (const char *) op->devices->data, op->action, op->target);
         return rc;
     }
 
     if (safe_str_eq(crm_element_value(msg, F_SUBTYPE), "broadcast")) {
         crm_debug("Marking call to %s for %s on behalf of %s@%s.%.8s: %s (%d)",
                   op->action, op->target, op->client_name, op->id, op->originator,
                   pcmk_strerror(rc), rc);
         if (rc == pcmk_ok) {
             op->state = st_done;
         } else {
             op->state = st_failed;
         }
         remote_op_done(op, msg, rc, FALSE);
         return pcmk_ok;
     } else if (safe_str_neq(op->originator, stonith_our_uname)) {
         /* If this isn't a remote level broadcast, and we are not the
          * originator of the operation, we should not be receiving this msg. */
         crm_err
             ("%s received non-broadcast fencing result for operation it does not own (device %s targeting %s)",
              stonith_our_uname, device, op->target);
         return rc;
     }
 
     if (is_set(op->call_options, st_opt_topology)) {
         const char *device = crm_element_value(msg, F_STONITH_DEVICE);
 
         crm_notice("Action '%s' targeting %s using %s on behalf of %s@%s: %s "
                    CRM_XS " rc=%d",
                    op->action, op->target, device, op->client_name,
                    op->originator, pcmk_strerror(rc), rc);
 
         /* We own the op, and it is complete. broadcast the result to all nodes
          * and notify our local clients. */
         if (op->state == st_done) {
             remote_op_done(op, msg, rc, FALSE);
             return rc;
         }
 
         if ((op->phase == 2) && (rc != pcmk_ok)) {
             /* A remapped "on" failed, but the node was already turned off
              * successfully, so ignore the error and continue.
              */
             crm_warn("Ignoring %s 'on' failure (exit code %d) targeting %s "
                      "after successful 'off'", device, rc, op->target);
             rc = pcmk_ok;
         }
 
         if (rc == pcmk_ok) {
             /* An operation completed successfully. Try another device if
              * necessary, otherwise mark the operation as done. */
             advance_op_topology(op, device, msg, rc);
             return rc;
         } else {
             /* This device failed, time to try another topology level. If no other
              * levels are available, mark this operation as failed and report results. */
             if (stonith_topology_next(op) != pcmk_ok) {
                 op->state = st_failed;
                 remote_op_done(op, msg, rc, FALSE);
                 return rc;
             }
         }
     } else if (rc == pcmk_ok && op->devices == NULL) {
         crm_trace("All done for %s", op->target);
 
         op->state = st_done;
         remote_op_done(op, msg, rc, FALSE);
         return rc;
     } else if (rc == -ETIME && op->devices == NULL) {
         /* If the operation timed out don't bother retrying other peers. */
         op->state = st_failed;
         remote_op_done(op, msg, rc, FALSE);
         return rc;
     } else {
         /* fall-through and attempt other fencing action using another peer */
     }
 
     /* Retry on failure */
     crm_trace("Next for %s on behalf of %s@%s (rc was %d)", op->target, op->originator,
               op->client_name, rc);
     call_remote_stonith(op, NULL);
     return rc;
 }
 
 gboolean
 stonith_check_fence_tolerance(int tolerance, const char *target, const char *action)
 {
     GHashTableIter iter;
     time_t now = time(NULL);
     remote_fencing_op_t *rop = NULL;
 
     crm_trace("tolerance=%d, stonith_remote_op_list=%p", tolerance,
               stonith_remote_op_list);
 
     if (tolerance <= 0 || !stonith_remote_op_list || target == NULL ||
         action == NULL) {
         return FALSE;
     }
 
     g_hash_table_iter_init(&iter, stonith_remote_op_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&rop)) {
         if (strcmp(rop->target, target) != 0) {
             continue;
         } else if (rop->state != st_done) {
             continue;
         /* We don't have to worry about remapped reboots here
          * because if state is done, any remapping has been undone
          */
         } else if (strcmp(rop->action, action) != 0) {
             continue;
         } else if ((rop->completed + tolerance) < now) {
             continue;
         }
 
         crm_notice("Target %s was fenced (%s) less than %ds ago by %s on behalf of %s",
                    target, action, tolerance, rop->delegate, rop->originator);
         return TRUE;
     }
     return FALSE;
 }
diff --git a/daemons/fenced/pacemaker-fenced.h b/daemons/fenced/pacemaker-fenced.h
index 583cb47a14..e2c74ea3c8 100644
--- a/daemons/fenced/pacemaker-fenced.h
+++ b/daemons/fenced/pacemaker-fenced.h
@@ -1,263 +1,271 @@
 /*
  * Copyright 2009-2019 the Pacemaker project contributors
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm/common/mainloop.h>
 
 /*!
  * \internal
  * \brief Check to see if target was fenced in the last few seconds.
  * \param tolerance, The number of seconds to look back in time
  * \param target, The node to search for
  * \param action, The action we want to match.
  *
  * \retval FALSE, not match
  * \retval TRUE, fencing operation took place in the last 'tolerance' number of seconds.
  */
 gboolean stonith_check_fence_tolerance(int tolerance, const char *target, const char *action);
 
 enum st_device_flags
 {
     st_device_supports_list   = 0x0001,
     st_device_supports_status = 0x0002,
     st_device_supports_reboot = 0x0004,
 };
 
 typedef struct stonith_device_s {
     char *id;
     char *agent;
     char *namespace;
 
     /*! list of actions that must execute on the target node. Used for unfencing */
     char *on_target_actions;
     GListPtr targets;
     time_t targets_age;
     gboolean has_attr_map;
     /* should nodeid parameter for victim be included in agent arguments */
     gboolean include_nodeid;
     /* whether the cluster should automatically unfence nodes with the device */
     gboolean automatic_unfencing;
     guint priority;
 
     enum st_device_flags flags;
 
     GHashTable *params;
     GHashTable *aliases;
     GList *pending_ops;
     crm_trigger_t *work;
     xmlNode *agent_metadata;
 
     /*! A verified device is one that has contacted the
      * agent successfully to perform a monitor operation */
     gboolean verified;
 
     gboolean cib_registered;
     gboolean api_registered;
 } stonith_device_t;
 
 /* These values are used to index certain arrays by "phase". Usually an
  * operation has only one "phase", so phase is always zero. However, some
  * reboots are remapped to "off" then "on", in which case "reboot" will be
  * phase 0, "off" will be phase 1 and "on" will be phase 2.
  */
 enum st_remap_phase {
     st_phase_requested = 0,
     st_phase_off = 1,
     st_phase_on = 2,
     st_phase_max = 3
 };
 
+/* These values provide additional information for STONITH's asynchronous reply response.
+ * The st_reply_opt_merged value indicates an operation that has been merged and completed without being executed.
+ */
+enum st_replay_option {
+    st_reply_opt_none            = 0x00000000,
+    st_reply_opt_merged          = 0x00000001,
+};
+
 typedef struct remote_fencing_op_s {
     /* The unique id associated with this operation */
     char *id;
     /*! The node this operation will fence */
     char *target;
     /*! The fencing action to perform on the target. (reboot, on, off) */
     char *action;
 
     /*! When was the fencing action recorded (seconds since epoch) */
     time_t created;
 
     /*! Marks if the final notifications have been sent to local stonith clients. */
     gboolean notify_sent;
     /*! The number of query replies received */
     guint replies;
     /*! The number of query replies expected */
     guint replies_expected;
     /*! Does this node own control of this operation */
     gboolean owner;
     /*! After query is complete, This the high level timer that expires the entire operation */
     guint op_timer_total;
     /*! This timer expires the current fencing request. Many fencing
      * requests may exist in a single operation */
     guint op_timer_one;
     /*! This timer expires the query request sent out to determine
      * what nodes are contain what devices, and who those devices can fence */
     guint query_timer;
     /*! This is the default timeout to use for each fencing device if no
      * custom timeout is received in the query. */
     gint base_timeout;
     /*! This is the calculated total timeout an operation can take before
      * expiring. This is calculated by adding together all the timeout
      * values associated with the devices this fencing operation may call */
     gint total_timeout;
 
     /*! Delegate is the node being asked to perform a fencing action
      * on behalf of the node that owns the remote operation. Some operations
      * will involve multiple delegates. This value represents the final delegate
      * that is used. */
     char *delegate;
     /*! The point at which the remote operation completed */
     time_t completed;
     /*! The stonith_call_options associated with this remote operation */
     long long call_options;
 
     /*! The current state of the remote operation. This indicates
      * what stage the op is in, query, exec, done, duplicate, failed. */
     enum op_state state;
     /*! The node that owns the remote operation */
     char *originator;
     /*! The local client id that initiated the fencing request */
     char *client_id;
     /*! The client's call_id that initiated the fencing request */
     int client_callid;
     /*! The name of client that initiated the fencing request */
     char *client_name;
     /*! List of the received query results for all the nodes in the cpg group */
     GListPtr query_results;
     /*! The original request that initiated the remote stonith operation */
     xmlNode *request;
 
     /*! The current topology level being executed */
     guint level;
     /*! The current operation phase being executed */
     enum st_remap_phase phase;
 
     /*! Devices with automatic unfencing (always run if "on" requested, never if remapped) */
     GListPtr automatic_list;
     /*! List of all devices at the currently executing topology level */
     GListPtr devices_list;
     /*! Current entry in the topology device list */
     GListPtr devices;
 
     /*! List of duplicate operations attached to this operation. Once this operation
      * completes, the duplicate operations will be closed out as well. */
     GListPtr duplicates;
 
 } remote_fencing_op_t;
 
 /*!
  * \internal
  * \brief Broadcast the result of an operation to the peers.
  * \param op, Operation whose result should be broadcast
  * \param rc, Result of the operation
  */
-void stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc);
+void stonith_bcast_result_to_peers(remote_fencing_op_t * op, int rc, gboolean op_merged);
 
 enum st_callback_flags {
     st_callback_unknown               = 0x0000,
     st_callback_notify_fence          = 0x0001,
     st_callback_device_add            = 0x0004,
     st_callback_device_del            = 0x0010,
     st_callback_notify_history        = 0x0020,
     st_callback_notify_history_synced = 0x0040
 };
 
 /*
  * Complex fencing requirements are specified via fencing topologies.
  * A topology consists of levels; each level is a list of fencing devices.
  * Topologies are stored in a hash table by node name. When a node needs to be
  * fenced, if it has an entry in the topology table, the levels are tried
  * sequentially, and the devices in each level are tried sequentially.
  * Fencing is considered successful as soon as any level succeeds;
  * a level is considered successful if all its devices succeed.
  * Essentially, all devices at a given level are "and-ed" and the
  * levels are "or-ed".
  *
  * This structure is used for the topology table entries.
  * Topology levels start from 1, so levels[0] is unused and always NULL.
  */
 typedef struct stonith_topology_s {
     int kind;
 
     /*! Node name regex or attribute name=value for which topology applies */
     char *target;
     char *target_value;
     char *target_pattern;
     char *target_attribute;
 
     /*! Names of fencing devices at each topology level */
     GListPtr levels[ST_LEVEL_MAX];
 
 } stonith_topology_t;
 
 void init_device_list(void);
 void free_device_list(void);
 void init_topology_list(void);
 void free_topology_list(void);
 void free_stonith_remote_op_list(void);
 void init_stonith_remote_op_hash_table(GHashTable **table);
 void free_metadata_cache(void);
 
 long long get_stonith_flag(const char *name);
 
 void stonith_command(crm_client_t * client, uint32_t id, uint32_t flags,
                             xmlNode * op_request, const char *remote_peer);
 
 int stonith_device_register(xmlNode * msg, const char **desc, gboolean from_cib);
 
 int stonith_device_remove(const char *id, gboolean from_cib);
 
 char *stonith_level_key(xmlNode * msg, int mode);
 int stonith_level_kind(xmlNode * msg);
 int stonith_level_register(xmlNode * msg, char **desc);
 
 int stonith_level_remove(xmlNode * msg, char **desc);
 
 stonith_topology_t *find_topology_for_host(const char *host);
 
 void do_local_reply(xmlNode * notify_src, const char *client_id, gboolean sync_reply,
                            gboolean from_peer);
 
 xmlNode *stonith_construct_reply(xmlNode * request, const char *output, xmlNode * data,
                                         int rc);
 
 void
  do_stonith_async_timeout_update(const char *client, const char *call_id, int timeout);
 
 void do_stonith_notify(int options, const char *type, int result, xmlNode * data);
 void do_stonith_notify_device(int options, const char *op, int rc, const char *desc);
 void do_stonith_notify_level(int options, const char *op, int rc, const char *desc);
 
 remote_fencing_op_t *initiate_remote_stonith_op(crm_client_t * client, xmlNode * request,
                                                        gboolean manual_ack);
 
 int process_remote_stonith_exec(xmlNode * msg);
 
 int process_remote_stonith_query(xmlNode * msg);
 
 void *create_remote_stonith_op(const char *client, xmlNode * request, gboolean peer);
 
 int stonith_fence_history(xmlNode *msg, xmlNode **output,
                           const char *remote_peer, int options);
 
 void stonith_fence_history_trim(void);
 
 bool fencing_peer_active(crm_node_t *peer);
 
 int stonith_manual_ack(xmlNode * msg, remote_fencing_op_t * op);
 
 gboolean string_in_list(GListPtr list, const char *item);
 
 gboolean node_has_attr(const char *node, const char *name, const char *value);
 
 extern char *stonith_our_uname;
 extern gboolean stand_alone;
 extern GHashTable *device_list;
 extern GHashTable *topology;
 extern long stonith_watchdog_timeout_ms;
 
 extern GHashTable *stonith_remote_op_list;
diff --git a/include/crm/fencing/internal.h b/include/crm/fencing/internal.h
index 15b4964ef6..44826673a1 100644
--- a/include/crm/fencing/internal.h
+++ b/include/crm/fencing/internal.h
@@ -1,156 +1,157 @@
 /*
  * Copyright 2011-2019 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef STONITH_NG_INTERNAL__H
 #  define STONITH_NG_INTERNAL__H
 
 #  include <glib.h>
 #  include <crm/common/ipc.h>
 #  include <crm/common/output.h>
 #  include <crm/common/xml.h>
 
 struct stonith_action_s;
 typedef struct stonith_action_s stonith_action_t;
 
 stonith_action_t *stonith_action_create(const char *agent,
                                         const char *_action,
                                         const char *victim,
                                         uint32_t victim_nodeid,
                                         int timeout,
                                         GHashTable * device_args, GHashTable * port_map);
 void stonith__destroy_action(stonith_action_t *action);
 void stonith__action_result(stonith_action_t *action, int *rc, char **output,
                             char **error_output);
 
 int
 stonith_action_execute_async(stonith_action_t * action,
                              void *userdata,
                              void (*done) (GPid pid, int rc, const char *output,
                                            gpointer user_data),
                              void (*fork_cb) (GPid pid, gpointer user_data));
 
 int stonith__execute(stonith_action_t *action);
 
 xmlNode *create_level_registration_xml(const char *node, const char *pattern,
                                        const char *attr, const char *value,
                                        int level,
                                        stonith_key_value_t *device_list);
 
 xmlNode *create_device_registration_xml(const char *id,
                                         enum stonith_namespace namespace,
                                         const char *agent,
                                         stonith_key_value_t *params,
                                         const char *rsc_provides);
 
 void stonith__register_messages(pcmk__output_t *out);
 
 GList *stonith__parse_targets(const char *hosts);
 
 gboolean stonith__later_succeeded(stonith_history_t *event, stonith_history_t *top_history);
 stonith_history_t *stonith__sort_history(stonith_history_t *history);
 
 #  define ST_LEVEL_MAX 10
 
 #  define F_STONITH_CLIENTID      "st_clientid"
 #  define F_STONITH_CALLOPTS      "st_callopt"
 #  define F_STONITH_CALLID        "st_callid"
 #  define F_STONITH_CALLDATA      "st_calldata"
 #  define F_STONITH_OPERATION     "st_op"
 #  define F_STONITH_TARGET        "st_target"
 #  define F_STONITH_REMOTE_OP_ID  "st_remote_op"
 #  define F_STONITH_RC            "st_rc"
 /*! Timeout period per a device execution */
 #  define F_STONITH_TIMEOUT       "st_timeout"
 #  define F_STONITH_TOLERANCE     "st_tolerance"
 /*! Action specific timeout period returned in query of fencing devices. */
 #  define F_STONITH_ACTION_TIMEOUT       "st_action_timeout"
 /*! Host in query result is not allowed to run this action */
 #  define F_STONITH_ACTION_DISALLOWED     "st_action_disallowed"
 /*! Maximum of random fencing delay for a device */
 #  define F_STONITH_DELAY_MAX            "st_delay_max"
 /*! Base delay used for a fencing delay */
 #  define F_STONITH_DELAY_BASE           "st_delay_base"
 /*! Has this device been verified using a monitor type
  *  operation (monitor, list, status) */
 #  define F_STONITH_DEVICE_VERIFIED   "st_monitor_verified"
 /*! device is required for this action */
 #  define F_STONITH_DEVICE_REQUIRED   "st_required"
 /*! number of available devices in query result */
 #  define F_STONITH_AVAILABLE_DEVICES "st-available-devices"
 #  define F_STONITH_CALLBACK_TOKEN    "st_async_id"
 #  define F_STONITH_CLIENTNAME        "st_clientname"
 #  define F_STONITH_CLIENTNODE        "st_clientnode"
 #  define F_STONITH_NOTIFY_ACTIVATE   "st_notify_activate"
 #  define F_STONITH_NOTIFY_DEACTIVATE "st_notify_deactivate"
 #  define F_STONITH_DELEGATE      "st_delegate"
 /*! The node initiating the stonith operation.  If an operation
  * is relayed, this is the last node the operation lands on. When
  * in standalone mode, origin is the client's id that originated the
  * operation. */
 #  define F_STONITH_ORIGIN        "st_origin"
 #  define F_STONITH_HISTORY_LIST  "st_history"
 #  define F_STONITH_DATE          "st_date"
 #  define F_STONITH_STATE         "st_state"
 #  define F_STONITH_ACTIVE        "st_active"
 #  define F_STONITH_DIFFERENTIAL  "st_differential"
 
 #  define F_STONITH_DEVICE        "st_device_id"
 #  define F_STONITH_ACTION        "st_device_action"
 #  define F_STONITH_MODE          "st_mode"
+#  define F_STONITH_MERGED        "st_op_merged"
 
 #  define T_STONITH_NG        "stonith-ng"
 #  define T_STONITH_REPLY     "st-reply"
 /*! For async operations, an event from the server containing
  * the total amount of time the server is allowing for the operation
  * to take place is returned to the client. */
 #  define T_STONITH_TIMEOUT_VALUE "st-async-timeout-value"
 #  define T_STONITH_NOTIFY    "st_notify"
 
 #  define STONITH_ATTR_HOSTARG   "pcmk_host_argument"
 #  define STONITH_ATTR_HOSTMAP   "pcmk_host_map"
 #  define STONITH_ATTR_HOSTLIST  "pcmk_host_list"
 #  define STONITH_ATTR_HOSTCHECK "pcmk_host_check"
 #  define STONITH_ATTR_DELAY_MAX "pcmk_delay_max"
 #  define STONITH_ATTR_DELAY_BASE   "pcmk_delay_base"
 #  define STONITH_ATTR_ACTION_LIMIT "pcmk_action_limit"
 
 #  define STONITH_ATTR_ACTION_OP   "action"
 
 #  define STONITH_OP_EXEC        "st_execute"
 #  define STONITH_OP_TIMEOUT_UPDATE        "st_timeout_update"
 #  define STONITH_OP_QUERY       "st_query"
 #  define STONITH_OP_FENCE       "st_fence"
 #  define STONITH_OP_RELAY       "st_relay"
 #  define STONITH_OP_DEVICE_ADD      "st_device_register"
 #  define STONITH_OP_DEVICE_DEL      "st_device_remove"
 #  define STONITH_OP_FENCE_HISTORY   "st_fence_history"
 #  define STONITH_OP_LEVEL_ADD       "st_level_add"
 #  define STONITH_OP_LEVEL_DEL       "st_level_remove"
 
 #  define STONITH_WATCHDOG_AGENT  "#watchdog"
 
 #  ifdef HAVE_STONITH_STONITH_H
 // utilities from st_lha.c
 int stonith__list_lha_agents(stonith_key_value_t **devices);
 int stonith__lha_metadata(const char *agent, int timeout, char **output);
 bool stonith__agent_is_lha(const char *agent);
 int stonith__lha_validate(stonith_t *st, int call_options, const char *target,
                           const char *agent, GHashTable *params,
                           int timeout, char **output, char **error_output);
 #  endif
 
 // utilities from st_rhcs.c
 int stonith__list_rhcs_agents(stonith_key_value_t **devices);
 int stonith__rhcs_metadata(const char *agent, int timeout, char **output);
 bool stonith__agent_is_rhcs(const char *agent);
 int stonith__rhcs_validate(stonith_t *st, int call_options, const char *target,
                            const char *agent, GHashTable *params,
                            int timeout, char **output, char **error_output);
 
 #endif