diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c
index 43e20ba6e0..f01db70e1f 100644
--- a/daemons/controld/controld_messages.c
+++ b/daemons/controld/controld_messages.c
@@ -1,1303 +1,1292 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <string.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/cluster/internal.h>
 #include <crm/cib.h>
 #include <crm/common/ipc_internal.h>
 
 #include <pacemaker-controld.h>
 
 extern void crm_shutdown(int nsig);
 
 static enum crmd_fsa_input handle_message(xmlNode *msg,
                                           enum crmd_fsa_cause cause);
 static void handle_response(xmlNode *stored_msg);
 static enum crmd_fsa_input handle_request(xmlNode *stored_msg,
                                           enum crmd_fsa_cause cause);
 static enum crmd_fsa_input handle_shutdown_request(xmlNode *stored_msg);
 static void send_msg_via_ipc(xmlNode * msg, const char *sys);
 
 /* debug only, can wrap all it likes */
 static int last_data_id = 0;
 
 void
 register_fsa_error_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input,
                        fsa_data_t * cur_data, void *new_data, const char *raised_from)
 {
     /* save the current actions if any */
     if (controld_globals.fsa_actions != A_NOTHING) {
         register_fsa_input_adv(cur_data ? cur_data->fsa_cause : C_FSA_INTERNAL,
                                I_NULL, cur_data ? cur_data->data : NULL,
                                controld_globals.fsa_actions, TRUE, __func__);
     }
 
     /* reset the action list */
     crm_info("Resetting the current action list");
     fsa_dump_actions(controld_globals.fsa_actions, "Drop");
     controld_globals.fsa_actions = A_NOTHING;
 
     /* register the error */
     register_fsa_input_adv(cause, input, new_data, A_NOTHING, TRUE, raised_from);
 }
 
 void
 register_fsa_input_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input,
                        void *data, uint64_t with_actions,
                        gboolean prepend, const char *raised_from)
 {
     unsigned old_len = g_list_length(controld_globals.fsa_message_queue);
     fsa_data_t *fsa_data = NULL;
 
     if (raised_from == NULL) {
         raised_from = "<unknown>";
     }
 
     if (input == I_NULL && with_actions == A_NOTHING /* && data == NULL */ ) {
         /* no point doing anything */
         crm_err("Cannot add entry to queue: no input and no action");
         return;
     }
 
     if (input == I_WAIT_FOR_EVENT) {
         controld_set_global_flags(controld_fsa_is_stalled);
         crm_debug("Stalling the FSA pending further input: source=%s cause=%s data=%p queue=%d",
                   raised_from, fsa_cause2string(cause), data, old_len);
 
         if (old_len > 0) {
             fsa_dump_queue(LOG_TRACE);
             prepend = FALSE;
         }
 
         if (data == NULL) {
             controld_set_fsa_action_flags(with_actions);
             fsa_dump_actions(with_actions, "Restored");
             return;
         }
 
         /* Store everything in the new event and reset
          * controld_globals.fsa_actions
          */
         with_actions |= controld_globals.fsa_actions;
         controld_globals.fsa_actions = A_NOTHING;
     }
 
     last_data_id++;
     crm_trace("%s %s FSA input %d (%s) due to %s, %s data",
               raised_from, (prepend? "prepended" : "appended"), last_data_id,
               fsa_input2string(input), fsa_cause2string(cause),
               (data? "with" : "without"));
 
     fsa_data = calloc(1, sizeof(fsa_data_t));
     fsa_data->id = last_data_id;
     fsa_data->fsa_input = input;
     fsa_data->fsa_cause = cause;
     fsa_data->origin = raised_from;
     fsa_data->data = NULL;
     fsa_data->data_type = fsa_dt_none;
     fsa_data->actions = with_actions;
 
     if (with_actions != A_NOTHING) {
         crm_trace("Adding actions %.16llx to input",
                   (unsigned long long) with_actions);
     }
 
     if (data != NULL) {
         switch (cause) {
             case C_FSA_INTERNAL:
             case C_CRMD_STATUS_CALLBACK:
             case C_IPC_MESSAGE:
             case C_HA_MESSAGE:
                 CRM_CHECK(((ha_msg_input_t *) data)->msg != NULL,
                           crm_err("Bogus data from %s", raised_from));
                 crm_trace("Copying %s data from %s as cluster message data",
                           fsa_cause2string(cause), raised_from);
                 fsa_data->data = copy_ha_msg_input(data);
                 fsa_data->data_type = fsa_dt_ha_msg;
                 break;
 
             case C_LRM_OP_CALLBACK:
                 crm_trace("Copying %s data from %s as lrmd_event_data_t",
                           fsa_cause2string(cause), raised_from);
                 fsa_data->data = lrmd_copy_event((lrmd_event_data_t *) data);
                 fsa_data->data_type = fsa_dt_lrm;
                 break;
 
             case C_TIMER_POPPED:
             case C_SHUTDOWN:
             case C_UNKNOWN:
             case C_STARTUP:
                 crm_crit("Copying %s data (from %s) is not yet implemented",
                          fsa_cause2string(cause), raised_from);
                 crmd_exit(CRM_EX_SOFTWARE);
                 break;
         }
     }
 
     /* make sure to free it properly later */
     if (prepend) {
         controld_globals.fsa_message_queue
             = g_list_prepend(controld_globals.fsa_message_queue, fsa_data);
     } else {
         controld_globals.fsa_message_queue
             = g_list_append(controld_globals.fsa_message_queue, fsa_data);
     }
 
     crm_trace("FSA message queue length is %d",
               g_list_length(controld_globals.fsa_message_queue));
 
     /* fsa_dump_queue(LOG_TRACE); */
 
     if (old_len == g_list_length(controld_globals.fsa_message_queue)) {
         crm_err("Couldn't add message to the queue");
     }
 
     if (input != I_WAIT_FOR_EVENT) {
         controld_trigger_fsa();
     }
 }
 
 void
 fsa_dump_queue(int log_level)
 {
     int offset = 0;
 
     for (GList *iter = controld_globals.fsa_message_queue; iter != NULL;
          iter = iter->next) {
         fsa_data_t *data = (fsa_data_t *) iter->data;
 
         do_crm_log_unlikely(log_level,
                             "queue[%d.%d]: input %s raised by %s(%p.%d)\t(cause=%s)",
                             offset++, data->id, fsa_input2string(data->fsa_input),
                             data->origin, data->data, data->data_type,
                             fsa_cause2string(data->fsa_cause));
     }
 }
 
 ha_msg_input_t *
 copy_ha_msg_input(ha_msg_input_t * orig)
 {
     ha_msg_input_t *copy = calloc(1, sizeof(ha_msg_input_t));
 
     CRM_ASSERT(copy != NULL);
     copy->msg = (orig && orig->msg)? copy_xml(orig->msg) : NULL;
     copy->xml = get_message_xml(copy->msg, F_CRM_DATA);
     return copy;
 }
 
 void
 delete_fsa_input(fsa_data_t * fsa_data)
 {
     lrmd_event_data_t *op = NULL;
     xmlNode *foo = NULL;
 
     if (fsa_data == NULL) {
         return;
     }
     crm_trace("About to free %s data", fsa_cause2string(fsa_data->fsa_cause));
 
     if (fsa_data->data != NULL) {
         switch (fsa_data->data_type) {
             case fsa_dt_ha_msg:
                 delete_ha_msg_input(fsa_data->data);
                 break;
 
             case fsa_dt_xml:
                 foo = fsa_data->data;
                 free_xml(foo);
                 break;
 
             case fsa_dt_lrm:
                 op = (lrmd_event_data_t *) fsa_data->data;
                 lrmd_free_event(op);
                 break;
 
             case fsa_dt_none:
                 if (fsa_data->data != NULL) {
                     crm_err("Don't know how to free %s data from %s",
                             fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin);
                     crmd_exit(CRM_EX_SOFTWARE);
                 }
                 break;
         }
         crm_trace("%s data freed", fsa_cause2string(fsa_data->fsa_cause));
     }
 
     free(fsa_data);
 }
 
 /* returns the next message */
 fsa_data_t *
 get_message(void)
 {
     fsa_data_t *message
         = (fsa_data_t *) controld_globals.fsa_message_queue->data;
 
     controld_globals.fsa_message_queue
         = g_list_remove(controld_globals.fsa_message_queue, message);
     crm_trace("Processing input %d", message->id);
     return message;
 }
 
 void *
 fsa_typed_data_adv(fsa_data_t * fsa_data, enum fsa_data_type a_type, const char *caller)
 {
     void *ret_val = NULL;
 
     if (fsa_data == NULL) {
         crm_err("%s: No FSA data available", caller);
 
     } else if (fsa_data->data == NULL) {
         crm_err("%s: No message data available. Origin: %s", caller, fsa_data->origin);
 
     } else if (fsa_data->data_type != a_type) {
         crm_crit("%s: Message data was the wrong type! %d vs. requested=%d.  Origin: %s",
                  caller, fsa_data->data_type, a_type, fsa_data->origin);
         CRM_ASSERT(fsa_data->data_type == a_type);
     } else {
         ret_val = fsa_data->data;
     }
 
     return ret_val;
 }
 
 /*	A_MSG_ROUTE	*/
 void
 do_msg_route(long long action,
              enum crmd_fsa_cause cause,
              enum crmd_fsa_state cur_state,
              enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
 
     route_message(msg_data->fsa_cause, input->msg);
 }
 
 void
 route_message(enum crmd_fsa_cause cause, xmlNode * input)
 {
     ha_msg_input_t fsa_input;
     enum crmd_fsa_input result = I_NULL;
 
     fsa_input.msg = input;
     CRM_CHECK(cause == C_IPC_MESSAGE || cause == C_HA_MESSAGE, return);
 
     /* try passing the buck first */
     if (relay_message(input, cause == C_IPC_MESSAGE)) {
         return;
     }
 
     /* handle locally */
     result = handle_message(input, cause);
 
     /* done or process later? */
     switch (result) {
         case I_NULL:
         case I_CIB_OP:
         case I_ROUTER:
         case I_NODE_JOIN:
         case I_JOIN_REQUEST:
         case I_JOIN_RESULT:
             break;
         default:
             /* Defering local processing of message */
             register_fsa_input_later(cause, result, &fsa_input);
             return;
     }
 
     if (result != I_NULL) {
         /* add to the front of the queue */
         register_fsa_input(cause, result, &fsa_input);
     }
 }
 
 gboolean
 relay_message(xmlNode * msg, gboolean originated_locally)
 {
     int dest = 1;
     bool is_for_dc = false;
     bool is_for_dcib = false;
     bool is_for_te = false;
     bool is_for_crm = false;
     bool is_for_cib = false;
     bool is_local = false;
     const char *host_to = crm_element_value(msg, F_CRM_HOST_TO);
     const char *sys_to = crm_element_value(msg, F_CRM_SYS_TO);
     const char *sys_from = crm_element_value(msg, F_CRM_SYS_FROM);
     const char *type = crm_element_value(msg, F_TYPE);
     const char *task = crm_element_value(msg, F_CRM_TASK);
     const char *ref = crm_element_value(msg, XML_ATTR_REFERENCE);
 
     if (ref == NULL) {
         ref = "without reference ID";
     }
 
     if (msg == NULL) {
         crm_warn("Cannot route empty message");
         return TRUE;
 
     } else if (pcmk__str_eq(task, CRM_OP_HELLO, pcmk__str_casei)) {
         crm_trace("No routing needed for hello message %s", ref);
         return TRUE;
 
     } else if (!pcmk__str_eq(type, T_CRM, pcmk__str_casei)) {
         crm_warn("Received invalid message %s: type '%s' not '" T_CRM "'",
                  ref, pcmk__s(type, ""));
         crm_log_xml_warn(msg, "[bad message type]");
         return TRUE;
 
     } else if (sys_to == NULL) {
         crm_warn("Received invalid message %s: no subsystem", ref);
         crm_log_xml_warn(msg, "[no subsystem]");
         return TRUE;
     }
 
     is_for_dc = (strcasecmp(CRM_SYSTEM_DC, sys_to) == 0);
     is_for_dcib = (strcasecmp(CRM_SYSTEM_DCIB, sys_to) == 0);
     is_for_te = (strcasecmp(CRM_SYSTEM_TENGINE, sys_to) == 0);
     is_for_cib = (strcasecmp(CRM_SYSTEM_CIB, sys_to) == 0);
     is_for_crm = (strcasecmp(CRM_SYSTEM_CRMD, sys_to) == 0);
 
     is_local = false;
     if (pcmk__str_empty(host_to)) {
         if (is_for_dc || is_for_te) {
             is_local = false;
 
         } else if (is_for_crm) {
             if (pcmk__strcase_any_of(task, CRM_OP_NODE_INFO,
                                      PCMK__CONTROLD_CMD_NODES, NULL)) {
                 /* Node info requests do not specify a host, which is normally
                  * treated as "all hosts", because the whole point is that the
                  * client may not know the local node name. Always handle these
                  * requests locally.
                  */
                 is_local = true;
             } else {
                 is_local = !originated_locally;
             }
 
         } else {
             is_local = true;
         }
 
     } else if (pcmk__str_eq(controld_globals.our_nodename, host_to,
                             pcmk__str_casei)) {
         is_local = true;
     } else if (is_for_crm && pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
         xmlNode *msg_data = get_message_xml(msg, F_CRM_DATA);
         const char *mode = crm_element_value(msg_data, PCMK__XA_MODE);
 
         if (pcmk__str_eq(mode, XML_TAG_CIB, pcmk__str_casei)) {
             // Local delete of an offline node's resource history
             is_local = true;
         }
     }
 
     if (is_for_dc || is_for_dcib || is_for_te) {
         if (AM_I_DC && is_for_te) {
             crm_trace("Route message %s locally as transition request", ref);
             send_msg_via_ipc(msg, sys_to);
 
         } else if (AM_I_DC) {
             crm_trace("Route message %s locally as DC request", ref);
             return FALSE; // More to be done by caller
 
         } else if (originated_locally && !pcmk__strcase_any_of(sys_from, CRM_SYSTEM_PENGINE,
                                                                CRM_SYSTEM_TENGINE, NULL)) {
 
             if (is_corosync_cluster()) {
                 dest = text2msg_type(sys_to);
             }
             crm_trace("Relay message %s to DC", ref);
             send_cluster_message(host_to ? crm_get_peer(0, host_to) : NULL, dest, msg, TRUE);
 
         } else {
             /* Neither the TE nor the scheduler should be sending messages
              * to DCs on other nodes. By definition, if we are no longer the DC,
              * then the scheduler's or TE's data should be discarded.
              */
             crm_trace("Discard message %s because we are not DC", ref);
         }
 
     } else if (is_local && (is_for_crm || is_for_cib)) {
         crm_trace("Route message %s locally as controller request", ref);
         return FALSE; // More to be done by caller
 
     } else if (is_local) {
         crm_trace("Relay message %s locally to %s",
                   ref, (sys_to? sys_to : "unknown client"));
         crm_log_xml_trace(msg, "[IPC relay]");
         send_msg_via_ipc(msg, sys_to);
 
     } else {
         crm_node_t *node_to = NULL;
 
         if (is_corosync_cluster()) {
             dest = text2msg_type(sys_to);
 
             if (dest == crm_msg_none || dest > crm_msg_stonith_ng) {
                 dest = crm_msg_crmd;
             }
         }
 
         if (host_to) {
             node_to = pcmk__search_cluster_node_cache(0, host_to);
             if (node_to == NULL) {
                 crm_warn("Cannot route message %s: Unknown node %s",
                          ref, host_to);
                 return TRUE;
             }
             crm_trace("Relay message %s to %s",
                       ref, (node_to->uname? node_to->uname : "peer"));
         } else {
             crm_trace("Broadcast message %s to all peers", ref);
         }
         send_cluster_message(host_to ? node_to : NULL, dest, msg, TRUE);
     }
 
     return TRUE; // No further processing of message is needed
 }
 
 // Return true if field contains a positive integer
 static bool
 authorize_version(xmlNode *message_data, const char *field,
                   const char *client_name, const char *ref, const char *uuid)
 {
     const char *version = crm_element_value(message_data, field);
     long long version_num;
 
     if ((pcmk__scan_ll(version, &version_num, -1LL) != pcmk_rc_ok)
         || (version_num < 0LL)) {
 
         crm_warn("Rejected IPC hello from %s: '%s' is not a valid protocol %s "
                  CRM_XS " ref=%s uuid=%s",
                  client_name, ((version == NULL)? "" : version),
                  field, (ref? ref : "none"), uuid);
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether a client IPC message is acceptable
  *
  * If a given client IPC message is a hello, "authorize" it by ensuring it has
  * valid information such as a protocol version, and return false indicating
  * that nothing further needs to be done with the message. If the message is not
  * a hello, just return true to indicate it needs further processing.
  *
  * \param[in]     client_msg     XML of IPC message
  * \param[in,out] curr_client    If IPC is not proxied, client that sent message
  * \param[in]     proxy_session  If IPC is proxied, the session ID
  *
  * \return true if message needs further processing, false if it doesn't
  */
 bool
 controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_client,
                                const char *proxy_session)
 {
     xmlNode *message_data = NULL;
     const char *client_name = NULL;
     const char *op = crm_element_value(client_msg, F_CRM_TASK);
     const char *ref = crm_element_value(client_msg, XML_ATTR_REFERENCE);
     const char *uuid = (curr_client? curr_client->id : proxy_session);
 
     if (uuid == NULL) {
         crm_warn("IPC message from client rejected: No client identifier "
                  CRM_XS " ref=%s", (ref? ref : "none"));
         goto rejected;
     }
 
     if (!pcmk__str_eq(CRM_OP_HELLO, op, pcmk__str_casei)) {
         // Only hello messages need to be authorized
         return true;
     }
 
     message_data = get_message_xml(client_msg, F_CRM_DATA);
 
     client_name = crm_element_value(message_data, "client_name");
     if (pcmk__str_empty(client_name)) {
         crm_warn("IPC hello from client rejected: No client name",
                  CRM_XS " ref=%s uuid=%s", (ref? ref : "none"), uuid);
         goto rejected;
     }
     if (!authorize_version(message_data, "major_version", client_name, ref,
                            uuid)) {
         goto rejected;
     }
     if (!authorize_version(message_data, "minor_version", client_name, ref,
                            uuid)) {
         goto rejected;
     }
 
     crm_trace("Validated IPC hello from client %s", client_name);
     if (curr_client) {
         curr_client->userdata = strdup(client_name);
     }
     controld_trigger_fsa();
     return false;
 
 rejected:
     if (curr_client) {
         qb_ipcs_disconnect(curr_client->ipcs);
     }
     return false;
 }
 
 static enum crmd_fsa_input
 handle_message(xmlNode *msg, enum crmd_fsa_cause cause)
 {
     const char *type = NULL;
 
     CRM_CHECK(msg != NULL, return I_NULL);
 
     type = crm_element_value(msg, F_CRM_MSG_TYPE);
     if (pcmk__str_eq(type, XML_ATTR_REQUEST, pcmk__str_none)) {
         return handle_request(msg, cause);
 
     } else if (pcmk__str_eq(type, XML_ATTR_RESPONSE, pcmk__str_none)) {
         handle_response(msg);
         return I_NULL;
     }
 
     crm_err("Unknown message type: %s", type);
     return I_NULL;
 }
 
 static enum crmd_fsa_input
 handle_failcount_op(xmlNode * stored_msg)
 {
     const char *rsc = NULL;
     const char *uname = NULL;
     const char *op = NULL;
     char *interval_spec = NULL;
     guint interval_ms = 0;
     gboolean is_remote_node = FALSE;
     xmlNode *xml_op = get_message_xml(stored_msg, F_CRM_DATA);
 
     if (xml_op) {
         xmlNode *xml_rsc = first_named_child(xml_op, XML_CIB_TAG_RESOURCE);
         xmlNode *xml_attrs = first_named_child(xml_op, XML_TAG_ATTRS);
 
         if (xml_rsc) {
             rsc = ID(xml_rsc);
         }
         if (xml_attrs) {
             op = crm_element_value(xml_attrs,
                                    CRM_META "_" XML_RSC_ATTR_CLEAR_OP);
             crm_element_value_ms(xml_attrs,
                                  CRM_META "_" XML_RSC_ATTR_CLEAR_INTERVAL,
                                  &interval_ms);
         }
     }
     uname = crm_element_value(xml_op, XML_LRM_ATTR_TARGET);
 
     if ((rsc == NULL) || (uname == NULL)) {
         crm_log_xml_warn(stored_msg, "invalid failcount op");
         return I_NULL;
     }
 
     if (crm_element_value(xml_op, XML_LRM_ATTR_ROUTER_NODE)) {
         is_remote_node = TRUE;
     }
 
     crm_debug("Clearing failures for %s-interval %s on %s "
               "from attribute manager, CIB, and executor state",
               pcmk__readable_interval(interval_ms), rsc, uname);
 
     if (interval_ms) {
         interval_spec = crm_strdup_printf("%ums", interval_ms);
     }
     update_attrd_clear_failures(uname, rsc, op, interval_spec, is_remote_node);
     free(interval_spec);
 
     controld_cib_delete_last_failure(rsc, uname, op, interval_ms);
 
     lrm_clear_last_failure(rsc, uname, op, interval_ms);
 
     return I_NULL;
 }
 
 static enum crmd_fsa_input
 handle_lrm_delete(xmlNode *stored_msg)
 {
     const char *mode = NULL;
     xmlNode *msg_data = get_message_xml(stored_msg, F_CRM_DATA);
 
     CRM_CHECK(msg_data != NULL, return I_NULL);
 
     /* CRM_OP_LRM_DELETE has two distinct modes. The default behavior is to
      * relay the operation to the affected node, which will unregister the
      * resource from the local executor, clear the resource's history from the
      * CIB, and do some bookkeeping in the controller.
      *
      * However, if the affected node is offline, the client will specify
      * mode="cib" which means the controller receiving the operation should
      * clear the resource's history from the CIB and nothing else. This is used
      * to clear shutdown locks.
      */
     mode = crm_element_value(msg_data, PCMK__XA_MODE);
     if ((mode == NULL) || strcmp(mode, XML_TAG_CIB)) {
         // Relay to affected node
         crm_xml_add(stored_msg, F_CRM_SYS_TO, CRM_SYSTEM_LRMD);
         return I_ROUTER;
 
     } else {
         // Delete CIB history locally (compare with do_lrm_delete())
         const char *from_sys = NULL;
         const char *user_name = NULL;
         const char *rsc_id = NULL;
         const char *node = NULL;
         xmlNode *rsc_xml = NULL;
         int rc = pcmk_rc_ok;
 
         rsc_xml = first_named_child(msg_data, XML_CIB_TAG_RESOURCE);
         CRM_CHECK(rsc_xml != NULL, return I_NULL);
 
         rsc_id = ID(rsc_xml);
         from_sys = crm_element_value(stored_msg, F_CRM_SYS_FROM);
         node = crm_element_value(msg_data, XML_LRM_ATTR_TARGET);
         user_name = pcmk__update_acl_user(stored_msg, F_CRM_USER, NULL);
         crm_debug("Handling " CRM_OP_LRM_DELETE " for %s on %s locally%s%s "
                   "(clearing CIB resource history only)", rsc_id, node,
                   (user_name? " for user " : ""), (user_name? user_name : ""));
         rc = controld_delete_resource_history(rsc_id, node, user_name,
                                               cib_dryrun|cib_sync_call);
         if (rc == pcmk_rc_ok) {
             rc = controld_delete_resource_history(rsc_id, node, user_name,
                                                   crmd_cib_smart_opt());
         }
 
         //Notify client and tengine.(Only notify tengine if mode = "cib" and CRM_OP_LRM_DELETE.)
         if (from_sys) {
             lrmd_event_data_t *op = NULL;
             const char *from_host = crm_element_value(stored_msg,
                                                       F_CRM_HOST_FROM);
             const char *transition;
 
             if (strcmp(from_sys, CRM_SYSTEM_TENGINE)) {
                 transition = crm_element_value(msg_data,
                                                        XML_ATTR_TRANSITION_KEY);
             } else {
                 transition = crm_element_value(stored_msg,
                                                        XML_ATTR_TRANSITION_KEY);
             }
 
             crm_info("Notifying %s on %s that %s was%s deleted",
                      from_sys, (from_host? from_host : "local node"), rsc_id,
                      ((rc == pcmk_rc_ok)? "" : " not"));
             op = lrmd_new_event(rsc_id, CRMD_ACTION_DELETE, 0);
             op->type = lrmd_event_exec_complete;
             op->user_data = strdup(transition? transition : FAKE_TE_ID);
             op->params = pcmk__strkey_table(free, free);
             g_hash_table_insert(op->params, strdup(XML_ATTR_CRM_VERSION),
                                 strdup(CRM_FEATURE_SET));
             controld_rc2event(op, rc);
             controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id);
             lrmd_free_event(op);
             controld_trigger_delete_refresh(from_sys, rsc_id);
         }
         return I_NULL;
     }
 }
 
 /*!
  * \brief Handle a CRM_OP_REMOTE_STATE message by updating remote peer cache
  *
  * \param[in] msg  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_remote_state(const xmlNode *msg)
 {
     const char *remote_uname = ID(msg);
     crm_node_t *remote_peer;
     bool remote_is_up = false;
     int rc = pcmk_rc_ok;
 
     rc = pcmk__xe_get_bool_attr(msg, XML_NODE_IN_CLUSTER, &remote_is_up);
 
     CRM_CHECK(remote_uname && rc == pcmk_rc_ok, return I_NULL);
 
     remote_peer = crm_remote_peer_get(remote_uname);
     CRM_CHECK(remote_peer, return I_NULL);
 
     pcmk__update_peer_state(__func__, remote_peer,
                             remote_is_up ? CRM_NODE_MEMBER : CRM_NODE_LOST,
                             0);
+
     return I_NULL;
 }
 
 /*!
  * \brief Handle a CRM_OP_PING message
  *
  * \param[in] msg  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_ping(const xmlNode *msg)
 {
     const char *value = NULL;
     xmlNode *ping = NULL;
     xmlNode *reply = NULL;
 
     // Build reply
 
     ping = create_xml_node(NULL, XML_CRM_TAG_PING);
     value = crm_element_value(msg, F_CRM_SYS_TO);
     crm_xml_add(ping, XML_PING_ATTR_SYSFROM, value);
 
     // Add controller state
     value = fsa_state2string(controld_globals.fsa_state);
     crm_xml_add(ping, XML_PING_ATTR_CRMDSTATE, value);
     crm_notice("Current ping state: %s", value); // CTS needs this
 
     // Add controller health
     // @TODO maybe do some checks to determine meaningful status
     crm_xml_add(ping, XML_PING_ATTR_STATUS, "ok");
 
     // Send reply
     reply = create_reply(msg, ping);
     free_xml(ping);
     if (reply != NULL) {
         (void) relay_message(reply, TRUE);
         free_xml(reply);
     }
 
     // Nothing further to do
     return I_NULL;
 }
 
 /*!
  * \brief Handle a PCMK__CONTROLD_CMD_NODES message
  *
  * \param[in] request  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_node_list(const xmlNode *request)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
     xmlNode *reply = NULL;
     xmlNode *reply_data = NULL;
 
     // Create message data for reply
     reply_data = create_xml_node(NULL, XML_CIB_TAG_NODES);
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
         xmlNode *xml = create_xml_node(reply_data, XML_CIB_TAG_NODE);
 
         crm_xml_add_ll(xml, XML_ATTR_ID, (long long) node->id); // uint32_t
         crm_xml_add(xml, XML_ATTR_UNAME, node->uname);
         crm_xml_add(xml, XML_NODE_IN_CLUSTER, node->state);
     }
 
     // Create and send reply
     reply = create_reply(request, reply_data);
     free_xml(reply_data);
     if (reply) {
         (void) relay_message(reply, TRUE);
         free_xml(reply);
     }
 
     // Nothing further to do
     return I_NULL;
 }
 
 /*!
  * \brief Handle a CRM_OP_NODE_INFO request
  *
  * \param[in] msg  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_node_info_request(const xmlNode *msg)
 {
     const char *value = NULL;
     crm_node_t *node = NULL;
     int node_id = 0;
     xmlNode *reply = NULL;
     xmlNode *reply_data = NULL;
 
     // Build reply
 
     reply_data = create_xml_node(NULL, XML_CIB_TAG_NODE);
     crm_xml_add(reply_data, XML_PING_ATTR_SYSFROM, CRM_SYSTEM_CRMD);
 
     // Add whether current partition has quorum
     pcmk__xe_set_bool_attr(reply_data, XML_ATTR_HAVE_QUORUM,
                            pcmk_is_set(controld_globals.flags,
                                        controld_has_quorum));
 
     // Check whether client requested node info by ID and/or name
     crm_element_value_int(msg, XML_ATTR_ID, &node_id);
     if (node_id < 0) {
         node_id = 0;
     }
     value = crm_element_value(msg, XML_ATTR_UNAME);
 
     // Default to local node if none given
     if ((node_id == 0) && (value == NULL)) {
         value = controld_globals.our_nodename;
     }
 
     node = pcmk__search_node_caches(node_id, value, CRM_GET_PEER_ANY);
     if (node) {
         crm_xml_add(reply_data, XML_ATTR_ID, node->uuid);
         crm_xml_add(reply_data, XML_ATTR_UNAME, node->uname);
         crm_xml_add(reply_data, XML_NODE_IS_PEER, node->state);
         pcmk__xe_set_bool_attr(reply_data, XML_NODE_IS_REMOTE,
                                pcmk_is_set(node->flags, crm_remote_node));
     }
 
     // Send reply
     reply = create_reply(msg, reply_data);
     free_xml(reply_data);
     if (reply != NULL) {
         (void) relay_message(reply, TRUE);
         free_xml(reply);
     }
 
     // Nothing further to do
     return I_NULL;
 }
 
 static void
 verify_feature_set(xmlNode *msg)
 {
     const char *dc_version = crm_element_value(msg, XML_ATTR_CRM_VERSION);
 
     if (dc_version == NULL) {
         /* All we really know is that the DC feature set is older than 3.1.0,
          * but that's also all that really matters.
          */
         dc_version = "3.0.14";
     }
 
     if (feature_set_compatible(dc_version, CRM_FEATURE_SET)) {
         crm_trace("Local feature set (%s) is compatible with DC's (%s)",
                   CRM_FEATURE_SET, dc_version);
     } else {
         crm_err("Local feature set (%s) is incompatible with DC's (%s)",
                 CRM_FEATURE_SET, dc_version);
 
         // Nothing is likely to improve without administrator involvement
         controld_set_fsa_input_flags(R_STAYDOWN);
         crmd_exit(CRM_EX_FATAL);
     }
 }
 
 // DC gets own shutdown all-clear
 static enum crmd_fsa_input
 handle_shutdown_self_ack(xmlNode *stored_msg)
 {
     const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM);
 
     if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
         // The expected case -- we initiated own shutdown sequence
         crm_info("Shutting down controller");
         return I_STOP;
     }
 
     if (pcmk__str_eq(host_from, controld_globals.dc_name, pcmk__str_casei)) {
         // Must be logic error -- DC confirming its own unrequested shutdown
         crm_err("Shutting down controller immediately due to "
                 "unexpected shutdown confirmation");
         return I_TERMINATE;
     }
 
     if (controld_globals.fsa_state != S_STOPPING) {
         // Shouldn't happen -- non-DC confirming unrequested shutdown
         crm_err("Starting new DC election because %s is "
                 "confirming shutdown we did not request",
                 (host_from? host_from : "another node"));
         return I_ELECTION;
     }
 
     // Shouldn't happen, but we are already stopping anyway
     crm_debug("Ignoring unexpected shutdown confirmation from %s",
               (host_from? host_from : "another node"));
     return I_NULL;
 }
 
 // Non-DC gets shutdown all-clear from DC
 static enum crmd_fsa_input
 handle_shutdown_ack(xmlNode *stored_msg)
 {
     const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM);
 
     if (host_from == NULL) {
         crm_warn("Ignoring shutdown request without origin specified");
         return I_NULL;
     }
 
     if (pcmk__str_eq(host_from, controld_globals.dc_name,
                      pcmk__str_null_matches|pcmk__str_casei)) {
 
         if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
             crm_info("Shutting down controller after confirmation from %s",
                      host_from);
         } else {
             crm_err("Shutting down controller after unexpected "
                     "shutdown request from %s", host_from);
             controld_set_fsa_input_flags(R_STAYDOWN);
         }
         return I_STOP;
     }
 
     crm_warn("Ignoring shutdown request from %s because DC is %s",
              host_from, controld_globals.dc_name);
     return I_NULL;
 }
 
 static enum crmd_fsa_input
 handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause)
 {
     xmlNode *msg = NULL;
     const char *op = crm_element_value(stored_msg, F_CRM_TASK);
 
     /* Optimize this for the DC - it has the most to do */
 
     if (op == NULL) {
         crm_log_xml_warn(stored_msg, "[request without " F_CRM_TASK "]");
         return I_NULL;
     }
 
     if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) {
         const char *from = crm_element_value(stored_msg, F_CRM_HOST_FROM);
         crm_node_t *node = pcmk__search_cluster_node_cache(0, from);
 
         pcmk__update_peer_expected(__func__, node, CRMD_JOINSTATE_DOWN);
         if(AM_I_DC == FALSE) {
             return I_NULL; /* Done */
         }
     }
 
     /*========== DC-Only Actions ==========*/
     if (AM_I_DC) {
         if (strcmp(op, CRM_OP_JOIN_ANNOUNCE) == 0) {
             return I_NODE_JOIN;
 
         } else if (strcmp(op, CRM_OP_JOIN_REQUEST) == 0) {
             return I_JOIN_REQUEST;
 
         } else if (strcmp(op, CRM_OP_JOIN_CONFIRM) == 0) {
             return I_JOIN_RESULT;
 
         } else if (strcmp(op, CRM_OP_SHUTDOWN) == 0) {
             return handle_shutdown_self_ack(stored_msg);
 
         } else if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) {
             // Another controller wants to shut down its node
             return handle_shutdown_request(stored_msg);
-
-        } else if (strcmp(op, CRM_OP_REMOTE_STATE) == 0) {
-            /* a remote connection host is letting us know the node state */
-            return handle_remote_state(stored_msg);
         }
     }
 
     /*========== common actions ==========*/
     if (strcmp(op, CRM_OP_NOVOTE) == 0) {
         ha_msg_input_t fsa_input;
 
         fsa_input.msg = stored_msg;
         register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input,
                                A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE,
                                __func__);
 
+    } else if (strcmp(op, CRM_OP_REMOTE_STATE) == 0) {
+        /* a remote connection host is letting us know the node state */
+        return handle_remote_state(stored_msg);
+
     } else if (strcmp(op, CRM_OP_THROTTLE) == 0) {
         throttle_update(stored_msg);
         if (AM_I_DC && (controld_globals.transition_graph != NULL)
             && !controld_globals.transition_graph->complete) {
 
             crm_debug("The throttle changed. Trigger a graph.");
             trigger_graph();
         }
         return I_NULL;
 
     } else if (strcmp(op, CRM_OP_CLEAR_FAILCOUNT) == 0) {
         return handle_failcount_op(stored_msg);
 
     } else if (strcmp(op, CRM_OP_VOTE) == 0) {
         /* count the vote and decide what to do after that */
         ha_msg_input_t fsa_input;
 
         fsa_input.msg = stored_msg;
         register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input,
                                A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE,
                                __func__);
 
         /* Sometimes we _must_ go into S_ELECTION */
         if (controld_globals.fsa_state == S_HALT) {
             crm_debug("Forcing an election from S_HALT");
             return I_ELECTION;
 #if 0
         } else if (AM_I_DC) {
             /* This is the old way of doing things but what is gained? */
             return I_ELECTION;
 #endif
         }
 
     } else if (strcmp(op, CRM_OP_JOIN_OFFER) == 0) {
         verify_feature_set(stored_msg);
         crm_debug("Raising I_JOIN_OFFER: join-%s", crm_element_value(stored_msg, F_CRM_JOIN_ID));
         return I_JOIN_OFFER;
 
     } else if (strcmp(op, CRM_OP_JOIN_ACKNAK) == 0) {
         crm_debug("Raising I_JOIN_RESULT: join-%s", crm_element_value(stored_msg, F_CRM_JOIN_ID));
         return I_JOIN_RESULT;
 
     } else if (strcmp(op, CRM_OP_LRM_DELETE) == 0) {
         return handle_lrm_delete(stored_msg);
 
     } else if ((strcmp(op, CRM_OP_LRM_FAIL) == 0)
                || (strcmp(op, CRM_OP_LRM_REFRESH) == 0) // @COMPAT
                || (strcmp(op, CRM_OP_REPROBE) == 0)) {
 
         crm_xml_add(stored_msg, F_CRM_SYS_TO, CRM_SYSTEM_LRMD);
         return I_ROUTER;
 
     } else if (strcmp(op, CRM_OP_NOOP) == 0) {
         return I_NULL;
 
     } else if (strcmp(op, CRM_OP_LOCAL_SHUTDOWN) == 0) {
 
         crm_shutdown(SIGTERM);
         /*return I_SHUTDOWN; */
         return I_NULL;
 
     } else if (strcmp(op, CRM_OP_PING) == 0) {
         return handle_ping(stored_msg);
 
     } else if (strcmp(op, CRM_OP_NODE_INFO) == 0) {
         return handle_node_info_request(stored_msg);
 
     } else if (strcmp(op, CRM_OP_RM_NODE_CACHE) == 0) {
         int id = 0;
         const char *name = NULL;
 
         crm_element_value_int(stored_msg, XML_ATTR_ID, &id);
         name = crm_element_value(stored_msg, XML_ATTR_UNAME);
 
         if(cause == C_IPC_MESSAGE) {
             msg = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
             if (send_cluster_message(NULL, crm_msg_crmd, msg, TRUE) == FALSE) {
                 crm_err("Could not instruct peers to remove references to node %s/%u", name, id);
             } else {
                 crm_notice("Instructing peers to remove references to node %s/%u", name, id);
             }
             free_xml(msg);
 
         } else {
             reap_crm_member(id, name);
 
             /* If we're forgetting this node, also forget any failures to fence
              * it, so we don't carry that over to any node added later with the
              * same name.
              */
             st_fail_count_reset(name);
         }
 
     } else if (strcmp(op, CRM_OP_MAINTENANCE_NODES) == 0) {
         xmlNode *xml = get_message_xml(stored_msg, F_CRM_DATA);
 
         remote_ra_process_maintenance_nodes(xml);
 
     } else if (strcmp(op, PCMK__CONTROLD_CMD_NODES) == 0) {
         return handle_node_list(stored_msg);
 
         /*========== (NOT_DC)-Only Actions ==========*/
     } else if (!AM_I_DC) {
 
         if (strcmp(op, CRM_OP_SHUTDOWN) == 0) {
             return handle_shutdown_ack(stored_msg);
         }
 
     } else {
         crm_err("Unexpected request (%s) sent to %s", op, AM_I_DC ? "the DC" : "non-DC node");
         crm_log_xml_err(stored_msg, "Unexpected");
     }
 
     return I_NULL;
 }
 
 static void
 handle_response(xmlNode *stored_msg)
 {
     const char *op = crm_element_value(stored_msg, F_CRM_TASK);
 
     if (op == NULL) {
         crm_log_xml_err(stored_msg, "Bad message");
 
     } else if (AM_I_DC && strcmp(op, CRM_OP_PECALC) == 0) {
         // Check whether scheduler answer been superseded by subsequent request
         const char *msg_ref = crm_element_value(stored_msg, XML_ATTR_REFERENCE);
 
         if (msg_ref == NULL) {
             crm_err("%s - Ignoring calculation with no reference", op);
 
         } else if (pcmk__str_eq(msg_ref, controld_globals.fsa_pe_ref,
                                 pcmk__str_none)) {
             ha_msg_input_t fsa_input;
 
             controld_stop_sched_timer();
             fsa_input.msg = stored_msg;
             register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input);
 
         } else {
             crm_info("%s calculation %s is obsolete", op, msg_ref);
         }
 
     } else if (strcmp(op, CRM_OP_VOTE) == 0
                || strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0 || strcmp(op, CRM_OP_SHUTDOWN) == 0) {
 
     } else {
         const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM);
 
         crm_err("Unexpected response (op=%s, src=%s) sent to the %s",
                 op, host_from, AM_I_DC ? "DC" : "controller");
     }
 }
 
 static enum crmd_fsa_input
 handle_shutdown_request(xmlNode * stored_msg)
 {
     /* handle here to avoid potential version issues
      *   where the shutdown message/procedure may have
      *   been changed in later versions.
      *
      * This way the DC is always in control of the shutdown
      */
 
     char *now_s = NULL;
     const char *host_from = crm_element_value(stored_msg, F_CRM_HOST_FROM);
 
     if (host_from == NULL) {
         /* we're shutting down and the DC */
         host_from = controld_globals.our_nodename;
     }
 
     crm_info("Creating shutdown request for %s (state=%s)", host_from,
              fsa_state2string(controld_globals.fsa_state));
     crm_log_xml_trace(stored_msg, "message");
 
     now_s = pcmk__ttoa(time(NULL));
     update_attrd(host_from, XML_CIB_ATTR_SHUTDOWN, now_s, NULL, FALSE);
     free(now_s);
 
     /* will be picked up by the TE as long as its running */
     return I_NULL;
 }
 
 static void
 send_msg_via_ipc(xmlNode * msg, const char *sys)
 {
     pcmk__client_t *client_channel = NULL;
 
     CRM_CHECK(sys != NULL, return);
 
     client_channel = pcmk__find_client_by_id(sys);
 
     if (crm_element_value(msg, F_CRM_HOST_FROM) == NULL) {
         crm_xml_add(msg, F_CRM_HOST_FROM, controld_globals.our_nodename);
     }
 
     if (client_channel != NULL) {
         /* Transient clients such as crmadmin */
         pcmk__ipc_send_xml(client_channel, 0, msg, crm_ipc_server_event);
 
     } else if (pcmk__str_eq(sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) {
         xmlNode *data = get_message_xml(msg, F_CRM_DATA);
 
         process_te_message(msg, data);
 
     } else if (pcmk__str_eq(sys, CRM_SYSTEM_LRMD, pcmk__str_none)) {
         fsa_data_t fsa_data;
         ha_msg_input_t fsa_input;
 
         fsa_input.msg = msg;
         fsa_input.xml = get_message_xml(msg, F_CRM_DATA);
 
         fsa_data.id = 0;
         fsa_data.actions = 0;
         fsa_data.data = &fsa_input;
         fsa_data.fsa_input = I_MESSAGE;
         fsa_data.fsa_cause = C_IPC_MESSAGE;
         fsa_data.origin = __func__;
         fsa_data.data_type = fsa_dt_ha_msg;
 
         do_lrm_invoke(A_LRM_INVOKE, C_IPC_MESSAGE, controld_globals.fsa_state,
                       I_MESSAGE, &fsa_data);
 
     } else if (crmd_is_proxy_session(sys)) {
         crmd_proxy_send(sys, msg);
 
     } else {
         crm_info("Received invalid request: unknown subsystem '%s'", sys);
     }
 }
 
 void
 delete_ha_msg_input(ha_msg_input_t * orig)
 {
     if (orig == NULL) {
         return;
     }
     free_xml(orig->msg);
     free(orig);
 }
 
 /*!
  * \internal
- * \brief Notify the DC of a remote node state change
+ * \brief Notify the cluster of a remote node state change
  *
  * \param[in] node_name  Node's name
- * \param[in] node_up    TRUE if node is up, FALSE if down
+ * \param[in] node_up    true if node is up, false if down
  */
 void
-send_remote_state_message(const char *node_name, gboolean node_up)
+broadcast_remote_state_message(const char *node_name, bool node_up)
 {
-    /* If we don't have a DC, or the message fails, we have a failsafe:
-     * the DC will eventually pick up the change via the CIB node state.
-     * The message allows it to happen sooner if possible.
-     */
-    if (controld_globals.dc_name != NULL) {
-        xmlNode *msg = create_request(CRM_OP_REMOTE_STATE, NULL,
-                                      controld_globals.dc_name, CRM_SYSTEM_DC,
-                                      CRM_SYSTEM_CRMD, NULL);
-
-        crm_info("Notifying DC %s of Pacemaker Remote node %s %s",
-                 controld_globals.dc_name, node_name,
-                 node_up? "coming up" : "going down");
-        crm_xml_add(msg, XML_ATTR_ID, node_name);
-        pcmk__xe_set_bool_attr(msg, XML_NODE_IN_CLUSTER, node_up);
-        send_cluster_message(crm_get_peer(0, controld_globals.dc_name),
-                             crm_msg_crmd, msg, TRUE);
-        free_xml(msg);
-    } else {
-        crm_debug("No DC to notify of Pacemaker Remote node %s %s",
-                  node_name, (node_up? "coming up" : "going down"));
-    }
+    xmlNode *msg = create_request(CRM_OP_REMOTE_STATE, NULL, NULL,
+                                  CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
+
+    crm_info("Notifying cluster of Pacemaker Remote node %s %s",
+             node_name, node_up? "coming up" : "going down");
+    crm_xml_add(msg, XML_ATTR_ID, node_name);
+    pcmk__xe_set_bool_attr(msg, XML_NODE_IN_CLUSTER, node_up);
+    send_cluster_message(NULL, crm_msg_crmd, msg, TRUE);
+    free_xml(msg);
 }
 
diff --git a/daemons/controld/controld_messages.h b/daemons/controld/controld_messages.h
index 58b4ee1159..4108961182 100644
--- a/daemons/controld/controld_messages.h
+++ b/daemons/controld/controld_messages.h
@@ -1,86 +1,86 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef XML_CRM_MESSAGES__H
 #  define XML_CRM_MESSAGES__H
 
 #  include <crm/crm.h>
 #  include <crm/common/ipc_internal.h>
 #  include <crm/common/xml.h>
 #  include <crm/cluster/internal.h>
 #  include <controld_fsa.h>
 
 typedef struct ha_msg_input_s {
     xmlNode *msg;
     xmlNode *xml;
 
 } ha_msg_input_t;
 
 extern void delete_ha_msg_input(ha_msg_input_t * orig);
 
 extern void *fsa_typed_data_adv(fsa_data_t * fsa_data, enum fsa_data_type a_type,
                                 const char *caller);
 
 #  define fsa_typed_data(x) fsa_typed_data_adv(msg_data, x, __func__)
 
 extern void register_fsa_error_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input,
                                    fsa_data_t * cur_data, void *new_data, const char *raised_from);
 
 #define register_fsa_error(cause, input, new_data)  \
     register_fsa_error_adv(cause, input, msg_data, new_data, __func__)
 
 void register_fsa_input_adv(enum crmd_fsa_cause cause,
                             enum crmd_fsa_input input, void *data,
                             uint64_t with_actions, gboolean prepend,
                             const char *raised_from);
 
 extern void fsa_dump_queue(int log_level);
 extern void route_message(enum crmd_fsa_cause cause, xmlNode * input);
 
 #  define crmd_fsa_stall(suppress) do {                                 \
     if(suppress == FALSE && msg_data != NULL) {                         \
         register_fsa_input_adv(                                         \
             ((fsa_data_t*)msg_data)->fsa_cause, I_WAIT_FOR_EVENT,       \
             ((fsa_data_t*)msg_data)->data, action, TRUE, __func__);     \
     } else {                                                            \
         register_fsa_input_adv(                                         \
             C_FSA_INTERNAL, I_WAIT_FOR_EVENT,                           \
             NULL, action, TRUE, __func__);                              \
     }                                                                   \
     } while(0)
 
 #define register_fsa_input(cause, input, data)          \
     register_fsa_input_adv(cause, input, data, A_NOTHING, FALSE, __func__)
 
 #define register_fsa_input_before(cause, input, data)   \
     register_fsa_input_adv(cause, input, data, A_NOTHING, TRUE, __func__)
 
 #define register_fsa_input_later(cause, input, data)    \
     register_fsa_input_adv(cause, input, data, A_NOTHING, FALSE, __func__)
 
 void delete_fsa_input(fsa_data_t * fsa_data);
 
 fsa_data_t *get_message(void);
 
 extern gboolean relay_message(xmlNode * relay_message, gboolean originated_locally);
 
 gboolean crmd_is_proxy_session(const char *session);
 void crmd_proxy_send(const char *session, xmlNode *msg);
 
 bool controld_authorize_ipc_message(const xmlNode *client_msg,
                                     pcmk__client_t *curr_client,
                                     const char *proxy_session);
 
 extern gboolean send_request(xmlNode * msg, char **msg_reference);
 
 extern ha_msg_input_t *copy_ha_msg_input(ha_msg_input_t * orig);
 
-void send_remote_state_message(const char *node_name, gboolean node_up);
+void broadcast_remote_state_message(const char *node_name, bool node_up);
 
 #endif
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index faf3a32de7..7648403e86 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1375 +1,1375 @@
 /*
  * Copyright 2013-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/lrmd.h>
 #include <crm/lrmd_internal.h>
 #include <crm/services.h>
 
 #include <pacemaker-controld.h>
 
 #define REMOTE_LRMD_RA "remote"
 
 /* The max start timeout before cmd retry */
 #define MAX_START_TIMEOUT_MS 10000
 
 #define cmd_set_flags(cmd, flags_to_set) do { \
     (cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
                                        "Remote command", (cmd)->rsc_id, (cmd)->status, \
                                        (flags_to_set), #flags_to_set); \
         } while (0)
 
 #define cmd_clear_flags(cmd, flags_to_clear) do { \
     (cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
                                          "Remote command", (cmd)->rsc_id, (cmd)->status, \
                                          (flags_to_clear), #flags_to_clear); \
         } while (0)
 
 enum remote_cmd_status {
     cmd_reported_success    = (1 << 0),
     cmd_cancel              = (1 << 1),
 };
 
 typedef struct remote_ra_cmd_s {
     /*! the local node the cmd is issued from */
     char *owner;
     /*! the remote node the cmd is executed on */
     char *rsc_id;
     /*! the action to execute */
     char *action;
     /*! some string the client wants us to give it back */
     char *userdata;
     /*! start delay in ms */
     int start_delay;
     /*! timer id used for start delay. */
     int delay_id;
     /*! timeout in ms for cmd */
     int timeout;
     int remaining_timeout;
     /*! recurring interval in ms */
     guint interval_ms;
     /*! interval timer id */
     int interval_id;
     int monitor_timeout_id;
     int takeover_timeout_id;
     /*! action parameters */
     lrmd_key_value_t *params;
     pcmk__action_result_t result;
     int call_id;
     time_t start_time;
     uint32_t status;
 } remote_ra_cmd_t;
 
 #define lrm_remote_set_flags(lrm_state, flags_to_set) do { \
     lrm_state_t *lrm = (lrm_state); \
     remote_ra_data_t *ra = lrm->remote_ra_data; \
     ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
                                     lrm->node_name, ra->status, \
                                     (flags_to_set), #flags_to_set); \
         } while (0)
 
 #define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \
     lrm_state_t *lrm = (lrm_state); \
     remote_ra_data_t *ra = lrm->remote_ra_data; \
     ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
                                       lrm->node_name, ra->status, \
                                       (flags_to_clear), #flags_to_clear); \
         } while (0)
 
 enum remote_status {
     expect_takeover     = (1 << 0),
     takeover_complete   = (1 << 1),
     remote_active       = (1 << 2),
     /* Maintenance mode is difficult to determine from the controller's context,
      * so we have it signalled back with the transition from the scheduler.
      */
     remote_in_maint     = (1 << 3),
     /* Similar for whether we are controlling a guest node or remote node.
      * Fortunately there is a meta-attribute in the transition already and
      * as the situation doesn't change over time we can use the
      * resource start for noting down the information for later use when
      * the attributes aren't at hand.
      */
     controlling_guest   = (1 << 4),
 };
 
 typedef struct remote_ra_data_s {
     crm_trigger_t *work;
     remote_ra_cmd_t *cur_cmd;
     GList *cmds;
     GList *recurring_cmds;
     uint32_t status;
 } remote_ra_data_t;
 
 static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
 static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
 static GList *fail_all_monitor_cmds(GList * list);
 
 static void
 free_cmd(gpointer user_data)
 {
     remote_ra_cmd_t *cmd = user_data;
 
     if (!cmd) {
         return;
     }
     if (cmd->delay_id) {
         g_source_remove(cmd->delay_id);
     }
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
     }
     if (cmd->monitor_timeout_id) {
         g_source_remove(cmd->monitor_timeout_id);
     }
     if (cmd->takeover_timeout_id) {
         g_source_remove(cmd->takeover_timeout_id);
     }
     free(cmd->owner);
     free(cmd->rsc_id);
     free(cmd->action);
     free(cmd->userdata);
     pcmk__reset_result(&(cmd->result));
     lrmd_key_value_freeall(cmd->params);
     free(cmd);
 }
 
 static int
 generate_callid(void)
 {
     static int remote_ra_callid = 0;
 
     remote_ra_callid++;
     if (remote_ra_callid <= 0) {
         remote_ra_callid = 1;
     }
 
     return remote_ra_callid;
 }
 
 static gboolean
 recurring_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->interval_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
 
         ra_data->cmds = g_list_append(ra_data->cmds, cmd);
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 static gboolean
 start_delay_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->delay_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node joining
  *
  * \param[in] node_name  Name of newly integrated pacemaker_remote node
  */
 static void
 remote_node_up(const char *node_name)
 {
     int call_opt;
     xmlNode *update, *state;
     crm_node_t *node;
     enum controld_section_e section = controld_section_all;
 
     CRM_CHECK(node_name != NULL, return);
     crm_info("Announcing Pacemaker Remote node %s", node_name);
 
     /* Clear node's entire state (resource history and transient attributes)
      * other than shutdown locks. The transient attributes should and normally
      * will be cleared when the node leaves, but since remote node state has a
      * number of corner cases, clear them here as well, to be sure.
      */
     call_opt = crmd_cib_smart_opt();
     if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
         section = controld_section_all_unlocked;
     }
     /* Purge node from attrd's memory */
     update_attrd_remote_node_removed(node_name, NULL);
 
     controld_delete_node_state(node_name, section, call_opt);
 
     /* Delete node's probe_complete attribute. This serves two purposes:
      *
      * - @COMPAT DCs < 1.1.14 in a rolling upgrade might use it
      * - deleting it (or any attribute for that matter) here ensures the
      *   attribute manager learns the node is remote
      */
     update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
 
     /* Ensure node is in the remote peer cache with member status */
     node = crm_remote_peer_get(node_name);
     CRM_CHECK(node != NULL, return);
     pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
 
     /* pacemaker_remote nodes don't participate in the membership layer,
      * so cluster nodes don't automatically get notified when they come and go.
      * We send a cluster message to the DC, and update the CIB node state entry,
      * so the DC will get it sooner (via message) or later (via CIB refresh),
      * and any other interested parties can query the CIB.
      */
-    send_remote_state_message(node_name, TRUE);
+    broadcast_remote_state_message(node_name, true);
 
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     state = create_node_state_update(node, node_update_cluster, update,
                                      __func__);
 
     /* Clear the XML_NODE_IS_FENCED flag in the node state. If the node ever
      * needs to be fenced, this flag will allow various actions to determine
      * whether the fencing has happened yet.
      */
     crm_xml_add(state, XML_NODE_IS_FENCED, "0");
 
     /* TODO: If the remote connection drops, and this (async) CIB update either
      * failed or has not yet completed, later actions could mistakenly think the
      * node has already been fenced (if the XML_NODE_IS_FENCED attribute was
      * previously set, because it won't have been cleared). This could prevent
      * actual fencing or allow recurring monitor failures to be cleared too
      * soon. Ideally, we wouldn't rely on the CIB for the fenced status.
      */
     controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL);
     free_xml(update);
 }
 
 enum down_opts {
     DOWN_KEEP_LRM,
     DOWN_ERASE_LRM
 };
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node leaving
  *
  * \param[in] node_name  Name of lost node
  * \param[in] opts       Whether to keep or erase LRM history
  */
 static void
 remote_node_down(const char *node_name, const enum down_opts opts)
 {
     xmlNode *update;
     int call_opt = crmd_cib_smart_opt();
     crm_node_t *node;
 
     /* Purge node from attrd's memory */
     update_attrd_remote_node_removed(node_name, NULL);
 
     /* Normally, only node attributes should be erased, and the resource history
      * should be kept until the node comes back up. However, after a successful
      * fence, we want to clear the history as well, so we don't think resources
      * are still running on the node.
      */
     if (opts == DOWN_ERASE_LRM) {
         controld_delete_node_state(node_name, controld_section_all, call_opt);
     } else {
         controld_delete_node_state(node_name, controld_section_attrs, call_opt);
     }
 
     /* Ensure node is in the remote peer cache with lost state */
     node = crm_remote_peer_get(node_name);
     CRM_CHECK(node != NULL, return);
     pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0);
 
     /* Notify DC */
-    send_remote_state_message(node_name, FALSE);
+    broadcast_remote_state_message(node_name, false);
 
     /* Update CIB node state */
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     create_node_state_update(node, node_update_cluster, update, __func__);
     controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt, NULL);
     free_xml(update);
 }
 
 /*!
  * \internal
  * \brief Handle effects of a remote RA command on node state
  *
  * \param[in] cmd  Completed remote RA command
  */
 static void
 check_remote_node_state(const remote_ra_cmd_t *cmd)
 {
     /* Only successful actions can change node state */
     if (!pcmk__result_ok(&(cmd->result))) {
         return;
     }
 
     if (pcmk__str_eq(cmd->action, "start", pcmk__str_casei)) {
         remote_node_up(cmd->rsc_id);
 
     } else if (pcmk__str_eq(cmd->action, "migrate_from", pcmk__str_casei)) {
         /* After a successful migration, we don't need to do remote_node_up()
          * because the DC already knows the node is up, and we don't want to
          * clear LRM history etc. We do need to add the remote node to this
          * host's remote peer cache, because (unless it happens to be DC)
          * it hasn't been tracking the remote node, and other code relies on
          * the cache to distinguish remote nodes from unseen cluster nodes.
          */
         crm_node_t *node = crm_remote_peer_get(cmd->rsc_id);
 
         CRM_CHECK(node != NULL, return);
         pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
 
     } else if (pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
         lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
         remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
 
         if (ra_data) {
             if (!pcmk_is_set(ra_data->status, takeover_complete)) {
                 /* Stop means down if we didn't successfully migrate elsewhere */
                 remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
             } else if (AM_I_DC == FALSE) {
                 /* Only the connection host and DC track node state,
                  * so if the connection migrated elsewhere and we aren't DC,
                  * un-cache the node, so we don't have stale info
                  */
                 crm_remote_peer_cache_remove(cmd->rsc_id);
             }
         }
     }
 
     /* We don't do anything for successful monitors, which is correct for
      * routine recurring monitors, and for monitors on nodes where the
      * connection isn't supposed to be (the cluster will stop the connection in
      * that case). However, if the initial probe finds the connection already
      * active on the node where we want it, we probably should do
      * remote_node_up(). Unfortunately, we can't distinguish that case here.
      * Given that connections have to be initiated by the cluster, the chance of
      * that should be close to zero.
      */
 }
 
 static void
 report_remote_ra_result(remote_ra_cmd_t * cmd)
 {
     lrmd_event_data_t op = { 0, };
 
     check_remote_node_state(cmd);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = cmd->rsc_id;
     op.op_type = cmd->action;
     op.user_data = cmd->userdata;
     op.timeout = cmd->timeout;
     op.interval_ms = cmd->interval_ms;
     op.t_run = (unsigned int) cmd->start_time;
     op.t_rcchange = (unsigned int) cmd->start_time;
 
     lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status,
                      cmd->result.exit_reason);
 
     if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) {
         op.t_rcchange = (unsigned int) time(NULL);
         /* This edge case will likely never ever occur, but if it does the
          * result is that a failure will not be processed correctly. This is only
          * remotely possible because we are able to detect a connection resource's tcp
          * connection has failed at any moment after start has completed. The actual
          * recurring operation is just a connectivity ping.
          *
          * basically, we are not guaranteed that the first successful monitor op and
          * a subsequent failed monitor op will not occur in the same timestamp. We have to
          * make it look like the operations occurred at separate times though. */
         if (op.t_rcchange == op.t_run) {
             op.t_rcchange++;
         }
     }
 
     if (cmd->params) {
         lrmd_key_value_t *tmp;
 
         op.params = pcmk__strkey_table(free, free);
         for (tmp = cmd->params; tmp; tmp = tmp->next) {
             g_hash_table_insert(op.params, strdup(tmp->key), strdup(tmp->value));
         }
 
     }
     op.call_id = cmd->call_id;
     op.remote_nodename = cmd->owner;
 
     lrm_op_callback(&op);
 
     if (op.params) {
         g_hash_table_destroy(op.params);
     }
     lrmd__reset_result(&op);
 }
 
 static void
 update_remaining_timeout(remote_ra_cmd_t * cmd)
 {
     cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
 }
 
 static gboolean
 retry_start_cmd_cb(gpointer data)
 {
     lrm_state_t *lrm_state = data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd = NULL;
     int rc = ETIME;
 
     if (!ra_data || !ra_data->cur_cmd) {
         return FALSE;
     }
     cmd = ra_data->cur_cmd;
     if (!pcmk__strcase_any_of(cmd->action, "start", "migrate_from", NULL)) {
         return FALSE;
     }
     update_remaining_timeout(cmd);
 
     if (cmd->remaining_timeout > 0) {
         rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
     } else {
         pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                          PCMK_EXEC_TIMEOUT,
                          "Not enough time remains to retry remote connection");
     }
 
     if (rc != pcmk_rc_ok) {
         report_remote_ra_result(cmd);
 
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         ra_data->cur_cmd = NULL;
         free_cmd(cmd);
     } else {
         /* wait for connection event */
     }
 
     return FALSE;
 }
 
 
 static gboolean
 connection_takeover_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     crm_info("takeover event timed out for node %s", cmd->rsc_id);
     cmd->takeover_timeout_id = 0;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     handle_remote_ra_stop(lrm_state, cmd);
     free_cmd(cmd);
 
     return FALSE;
 }
 
 static gboolean
 monitor_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     crm_info("Timed out waiting for remote poke response from %s%s",
              cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
     cmd->monitor_timeout_id = 0;
     pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT,
                      "Remote executor did not respond");
 
     if (lrm_state && lrm_state->remote_ra_data) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         if (ra_data->cur_cmd == cmd) {
             ra_data->cur_cmd = NULL;
         }
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
     }
 
     report_remote_ra_result(cmd);
     free_cmd(cmd);
 
     if(lrm_state) {
         lrm_state_disconnect(lrm_state);
     }
     return FALSE;
 }
 
 static void
 synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
 {
     lrmd_event_data_t op = { 0, };
 
     if (lrm_state == NULL) {
         /* if lrm_state not given assume local */
         lrm_state = lrm_state_find(controld_globals.our_nodename);
     }
     CRM_ASSERT(lrm_state != NULL);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = rsc_id;
     op.op_type = op_type;
     op.t_run = (unsigned int) time(NULL);
     op.t_rcchange = op.t_run;
     op.call_id = generate_callid();
     lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
     process_lrm_event(lrm_state, &op, NULL, NULL);
 }
 
 void
 remote_lrm_op_callback(lrmd_event_data_t * op)
 {
     gboolean cmd_handled = FALSE;
     lrm_state_t *lrm_state = NULL;
     remote_ra_data_t *ra_data = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
               "(%d) status=%s (%d)",
               (op->op_type? op->op_type : ""), (op->op_type? " " : ""),
               lrmd_event_type2str(op->type), op->remote_nodename,
               services_ocf_exitcode_str(op->rc), op->rc,
               pcmk_exec_status_str(op->op_status), op->op_status);
 
     lrm_state = lrm_state_find(op->remote_nodename);
     if (!lrm_state || !lrm_state->remote_ra_data) {
         crm_debug("No state information found for remote connection event");
         return;
     }
     ra_data = lrm_state->remote_ra_data;
 
     if (op->type == lrmd_event_new_client) {
         // Another client has connected to the remote daemon
 
         if (pcmk_is_set(ra_data->status, expect_takeover)) {
             // Great, we knew this was coming
             lrm_remote_clear_flags(lrm_state, expect_takeover);
             lrm_remote_set_flags(lrm_state, takeover_complete);
 
         } else {
             crm_err("Disconnecting from Pacemaker Remote node %s due to "
                     "unexpected client takeover", op->remote_nodename);
             /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
             /* Do not free lrm_state->conn yet. */
             /* It'll be freed in the following stop action. */
             lrm_state_disconnect_only(lrm_state);
         }
         return;
     }
 
     /* filter all EXEC events up */
     if (op->type == lrmd_event_exec_complete) {
         if (pcmk_is_set(ra_data->status, takeover_complete)) {
             crm_debug("ignoring event, this connection is taken over by another node");
         } else {
             lrm_op_callback(op);
         }
         return;
     }
 
     if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
 
         if (!pcmk_is_set(ra_data->status, remote_active)) {
             crm_debug("Disconnection from Pacemaker Remote node %s complete",
                       lrm_state->node_name);
 
         } else if (!remote_ra_is_in_maintenance(lrm_state)) {
             crm_err("Lost connection to Pacemaker Remote node %s",
                     lrm_state->node_name);
             ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
             ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
 
         } else {
             crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
                        lrm_state->node_name);
             /* Do roughly what a 'stop' on the remote-resource would do */
             handle_remote_ra_stop(lrm_state, NULL);
             remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
             /* now fake the reply of a successful 'stop' */
             synthesize_lrmd_success(NULL, lrm_state->node_name, "stop");
         }
         return;
     }
 
     if (!ra_data->cur_cmd) {
         crm_debug("no event to match");
         return;
     }
 
     cmd = ra_data->cur_cmd;
 
     /* Start actions and migrate from actions complete after connection
      * comes back to us. */
     if (op->type == lrmd_event_connect && pcmk__strcase_any_of(cmd->action, "start",
                                                                "migrate_from", NULL)) {
         if (op->connection_rc < 0) {
             update_remaining_timeout(cmd);
 
             if ((op->connection_rc == -ENOKEY)
                 || (op->connection_rc == -EKEYREJECTED)) {
                 // Hard error, don't retry
                 pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM,
                                  PCMK_EXEC_ERROR,
                                  pcmk_strerror(op->connection_rc));
 
             } else if (cmd->remaining_timeout > 3000) {
                 crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
                 g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
                 return;
 
             } else {
                 crm_trace("can't reschedule start, remaining timeout too small %d",
                           cmd->remaining_timeout);
                 pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                                     PCMK_EXEC_TIMEOUT,
                                     "%s without enough time to retry",
                                     pcmk_strerror(op->connection_rc));
             }
 
         } else {
             lrm_state_reset_tables(lrm_state, TRUE);
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             lrm_remote_set_flags(lrm_state, remote_active);
         }
 
         crm_debug("Remote connection event matched %s action", cmd->action);
         report_remote_ra_result(cmd);
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_poke && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
 
         if (cmd->monitor_timeout_id) {
             g_source_remove(cmd->monitor_timeout_id);
             cmd->monitor_timeout_id = 0;
         }
 
         /* Only report success the first time, after that only worry about failures.
          * For this function, if we get the poke pack, it is always a success. Pokes
          * only fail if the send fails, or the response times out. */
         if (!pcmk_is_set(cmd->status, cmd_reported_success)) {
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
             cmd_set_flags(cmd, cmd_reported_success);
         }
 
         crm_debug("Remote poke event matched %s action", cmd->action);
 
         /* success, keep rescheduling if interval is present. */
         if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) {
             ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
             cmd->interval_id = g_timeout_add(cmd->interval_ms,
                                              recurring_helper, cmd);
             cmd = NULL;         /* prevent free */
         }
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_disconnect && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
         if (pcmk_is_set(ra_data->status, remote_active) &&
             !pcmk_is_set(cmd->status, cmd_cancel)) {
             pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                              PCMK_EXEC_ERROR,
                              "Remote connection unexpectedly dropped "
                              "during monitor");
             report_remote_ra_result(cmd);
             crm_err("Remote connection to %s unexpectedly dropped during monitor",
                     lrm_state->node_name);
         }
         cmd_handled = TRUE;
 
     } else if (op->type == lrmd_event_new_client && pcmk__str_eq(cmd->action, "stop", pcmk__str_casei)) {
 
         handle_remote_ra_stop(lrm_state, cmd);
         cmd_handled = TRUE;
 
     } else {
         crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
     }
 
     if (cmd_handled) {
         ra_data->cur_cmd = NULL;
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         free_cmd(cmd);
     }
 }
 
 static void
 handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
 {
     remote_ra_data_t *ra_data = NULL;
 
     CRM_ASSERT(lrm_state);
     ra_data = lrm_state->remote_ra_data;
 
     if (!pcmk_is_set(ra_data->status, takeover_complete)) {
         /* delete pending ops when ever the remote connection is intentionally stopped */
         g_hash_table_remove_all(lrm_state->active_ops);
     } else {
         /* we no longer hold the history if this connection has been migrated,
          * however, we keep metadata cache for future use */
         lrm_state_reset_tables(lrm_state, FALSE);
     }
 
     lrm_remote_clear_flags(lrm_state, remote_active);
     lrm_state_disconnect(lrm_state);
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     ra_data->cmds = NULL;
     ra_data->recurring_cmds = NULL;
     ra_data->cur_cmd = NULL;
 
     if (cmd) {
         pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
         report_remote_ra_result(cmd);
     }
 }
 
 // \return Standard Pacemaker return code
 static int
 handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
 {
     const char *server = NULL;
     lrmd_key_value_t *tmp = NULL;
     int port = 0;
     int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
     int rc = pcmk_rc_ok;
 
     for (tmp = cmd->params; tmp; tmp = tmp->next) {
         if (pcmk__strcase_any_of(tmp->key, XML_RSC_ATTR_REMOTE_RA_ADDR,
                                  XML_RSC_ATTR_REMOTE_RA_SERVER, NULL)) {
             server = tmp->value;
         } else if (pcmk__str_eq(tmp->key, XML_RSC_ATTR_REMOTE_RA_PORT, pcmk__str_casei)) {
             port = atoi(tmp->value);
         } else if (pcmk__str_eq(tmp->key, CRM_META "_" XML_RSC_ATTR_CONTAINER, pcmk__str_casei)) {
             lrm_remote_set_flags(lrm_state, controlling_guest);
         }
     }
 
     rc = controld_connect_remote_executor(lrm_state, server, port,
                                           timeout_used);
     if (rc != pcmk_rc_ok) {
         pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                             PCMK_EXEC_ERROR,
                             "Could not connect to Pacemaker Remote node %s: %s",
                             lrm_state->node_name, pcmk_rc_str(rc));
     }
     return rc;
 }
 
 static gboolean
 handle_remote_ra_exec(gpointer user_data)
 {
     int rc = 0;
     lrm_state_t *lrm_state = user_data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd;
     GList *first = NULL;
 
     if (ra_data->cur_cmd) {
         /* still waiting on previous cmd */
         return TRUE;
     }
 
     while (ra_data->cmds) {
         first = ra_data->cmds;
         cmd = first->data;
         if (cmd->delay_id) {
             /* still waiting for start delay timer to trip */
             return TRUE;
         }
 
         ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
         g_list_free_1(first);
 
         if (!strcmp(cmd->action, "start") || !strcmp(cmd->action, "migrate_from")) {
             lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete);
             if (handle_remote_ra_start(lrm_state, cmd,
                                        cmd->timeout) == pcmk_rc_ok) {
                 /* take care of this later when we get async connection result */
                 crm_debug("Initiated async remote connection, %s action will complete after connect event",
                           cmd->action);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, "monitor")) {
 
             if (lrm_state_is_connected(lrm_state) == TRUE) {
                 rc = lrm_state_poke_connection(lrm_state);
                 if (rc < 0) {
                     pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                                      PCMK_EXEC_ERROR, pcmk_strerror(rc));
                 }
             } else {
                 rc = -1;
                 pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING,
                                  PCMK_EXEC_DONE, "Remote connection inactive");
             }
 
             if (rc == 0) {
                 crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
                           cmd->rsc_id);
                 ra_data->cur_cmd = cmd;
                 cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, "stop")) {
 
             if (pcmk_is_set(ra_data->status, expect_takeover)) {
                 /* briefly wait on stop for the takeover event to occur. If the
                  * takeover event does not occur during the wait period, that's fine.
                  * It just means that the remote-node's lrm_status section is going to get
                  * cleared which will require all the resources running in the remote-node
                  * to be explicitly re-detected via probe actions.  If the takeover does occur
                  * successfully, then we can leave the status section intact. */
                 cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
 
             handle_remote_ra_stop(lrm_state, cmd);
 
         } else if (!strcmp(cmd->action, "migrate_to")) {
             lrm_remote_clear_flags(lrm_state, takeover_complete);
             lrm_remote_set_flags(lrm_state, expect_takeover);
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
         } else if (pcmk__str_any_of(cmd->action, CRMD_ACTION_RELOAD,
                                     CRMD_ACTION_RELOAD_AGENT, NULL))  {
             /* Currently the only reloadable parameter is reconnect_interval,
              * which is only used by the scheduler via the CIB, so reloads are a
              * no-op.
              *
              * @COMPAT DC <2.1.0: We only need to check for "reload" in case
              * we're in a rolling upgrade with a DC scheduling "reload" instead
              * of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway,
              * so this would work for that purpose as well.
              */
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
         }
 
         free_cmd(cmd);
     }
 
     return TRUE;
 }
 
 static void
 remote_ra_data_init(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = NULL;
 
     if (lrm_state->remote_ra_data) {
         return;
     }
 
     ra_data = calloc(1, sizeof(remote_ra_data_t));
     ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
     lrm_state->remote_ra_data = ra_data;
 }
 
 void
 remote_ra_cleanup(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     if (!ra_data) {
         return;
     }
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
 
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     mainloop_destroy_trigger(ra_data->work);
     free(ra_data);
     lrm_state->remote_ra_data = NULL;
 }
 
 gboolean
 is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
 {
     if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
         return TRUE;
     }
     if ((id != NULL) && (lrm_state_find(id) != NULL)
         && !pcmk__str_eq(id, controld_globals.our_nodename, pcmk__str_casei)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 lrmd_rsc_info_t *
 remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
 {
     lrmd_rsc_info_t *info = NULL;
 
     if ((lrm_state_find(rsc_id))) {
         info = calloc(1, sizeof(lrmd_rsc_info_t));
 
         info->id = strdup(rsc_id);
         info->type = strdup(REMOTE_LRMD_RA);
         info->standard = strdup(PCMK_RESOURCE_CLASS_OCF);
         info->provider = strdup("pacemaker");
     }
 
     return info;
 }
 
 static gboolean
 is_remote_ra_supported_action(const char *action)
 {
     return pcmk__str_any_of(action,
                             CRMD_ACTION_START,
                             CRMD_ACTION_STOP,
                             CRMD_ACTION_STATUS,
                             CRMD_ACTION_MIGRATE,
                             CRMD_ACTION_MIGRATED,
                             CRMD_ACTION_RELOAD_AGENT,
                             CRMD_ACTION_RELOAD,
                             NULL);
 }
 
 static GList *
 fail_all_monitor_cmds(GList * list)
 {
     GList *rm_list = NULL;
     remote_ra_cmd_t *cmd = NULL;
     GList *gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms > 0) && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
             rm_list = g_list_append(rm_list, cmd);
         }
     }
 
     for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
 
         pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                          PCMK_EXEC_ERROR, "Lost connection to remote executor");
         crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
                   cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
         report_remote_ra_result(cmd);
 
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
 
     /* frees only the list data, not the cmds */
     g_list_free(rm_list);
     return list;
 }
 
 static GList *
 remove_cmd(GList * list, const char *action, guint interval_ms)
 {
     remote_ra_cmd_t *cmd = NULL;
     GList *gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, action, pcmk__str_casei)) {
             break;
         }
         cmd = NULL;
     }
     if (cmd) {
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
     return list;
 }
 
 int
 remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
                  const char *action, guint interval_ms)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     connection_rsc = lrm_state_find(rsc_id);
     if (!connection_rsc || !connection_rsc->remote_ra_data) {
         return -EINVAL;
     }
 
     ra_data = connection_rsc->remote_ra_data;
     ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
     ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
                                          interval_ms);
     if (ra_data->cur_cmd &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         (pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) {
 
         cmd_set_flags(ra_data->cur_cmd, cmd_cancel);
     }
 
     return 0;
 }
 
 static remote_ra_cmd_t *
 handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
                    const char *userdata)
 {
     GList *gIter = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     /* there are 3 places a potential duplicate monitor operation
      * could exist.
      * 1. recurring_cmds list. where the op is waiting for its next interval
      * 2. cmds list, where the op is queued to get executed immediately
      * 3. cur_cmd, which means the monitor op is in flight right now.
      */
     if (interval_ms == 0) {
         return NULL;
     }
 
     if (ra_data->cur_cmd &&
         !pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         pcmk__str_eq(ra_data->cur_cmd->action, "monitor", pcmk__str_casei)) {
 
         cmd = ra_data->cur_cmd;
         goto handle_dup;
     }
 
     for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
             goto handle_dup;
         }
     }
 
     for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, "monitor", pcmk__str_casei)) {
             goto handle_dup;
         }
     }
 
     return NULL;
 
 handle_dup:
 
     crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
               cmd->rsc_id, "monitor", interval_ms);
 
     /* update the userdata */
     if (userdata) {
        free(cmd->userdata);
        cmd->userdata = strdup(userdata);
     }
 
     /* if we've already reported success, generate a new call id */
     if (pcmk_is_set(cmd->status, cmd_reported_success)) {
         cmd->start_time = time(NULL);
         cmd->call_id = generate_callid();
         cmd_clear_flags(cmd, cmd_reported_success);
     }
 
     /* if we have an interval_id set, that means we are in the process of
      * waiting for this cmd's next interval. instead of waiting, cancel
      * the timer and execute the action immediately */
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
         cmd->interval_id = 0;
         recurring_helper(cmd);
     }
 
     return cmd;
 }
 
 /*!
  * \internal
  * \brief Execute an action using the (internal) ocf:pacemaker:remote agent
  *
  * \param[in]     lrm_state      Executor state object for remote connection
  * \param[in]     rsc_id         Connection resource ID
  * \param[in]     action         Action to execute
  * \param[in]     userdata       String to copy and pass to execution callback
  * \param[in]     interval_ms    Action interval (in milliseconds)
  * \param[in]     timeout_ms     Action timeout (in milliseconds)
  * \param[in]     start_delay_ms Delay (in milliseconds) before executing action
  * \param[in,out] params         Connection resource parameters
  * \param[out]    call_id        Where to store call ID on success
  *
  * \return Standard Pacemaker return code
  * \note This takes ownership of \p params, which should not be used or freed
  *       after calling this function.
  */
 int
 controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id,
                               const char *action, const char *userdata,
                               guint interval_ms, int timeout_ms,
                               int start_delay_ms, lrmd_key_value_t *params,
                               int *call_id)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_cmd_t *cmd = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     *call_id = 0;
 
     CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL)
               && (userdata != NULL) && (call_id != NULL),
               lrmd_key_value_freeall(params); return EINVAL);
 
     if (!is_remote_ra_supported_action(action)) {
         lrmd_key_value_freeall(params);
         return EOPNOTSUPP;
     }
 
     connection_rsc = lrm_state_find(rsc_id);
     if (connection_rsc == NULL) {
         lrmd_key_value_freeall(params);
         return ENOTCONN;
     }
 
     remote_ra_data_init(connection_rsc);
     ra_data = connection_rsc->remote_ra_data;
 
     cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
     if (cmd) {
         *call_id = cmd->call_id;
         lrmd_key_value_freeall(params);
         return pcmk_rc_ok;
     }
 
     cmd = calloc(1, sizeof(remote_ra_cmd_t));
     if (cmd == NULL) {
         lrmd_key_value_freeall(params);
         return ENOMEM;
     }
 
     cmd->owner = strdup(lrm_state->node_name);
     cmd->rsc_id = strdup(rsc_id);
     cmd->action = strdup(action);
     cmd->userdata = strdup(userdata);
     if ((cmd->owner == NULL) || (cmd->rsc_id == NULL) || (cmd->action == NULL)
         || (cmd->userdata == NULL)) {
         free_cmd(cmd);
         lrmd_key_value_freeall(params);
         return ENOMEM;
     }
 
     cmd->interval_ms = interval_ms;
     cmd->timeout = timeout_ms;
     cmd->start_delay = start_delay_ms;
     cmd->params = params;
     cmd->start_time = time(NULL);
 
     cmd->call_id = generate_callid();
 
     if (cmd->start_delay) {
         cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
     }
 
     ra_data->cmds = g_list_append(ra_data->cmds, cmd);
     mainloop_set_trigger(ra_data->work);
 
     *call_id = cmd->call_id;
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Immediately fail all monitors of a remote node, if proxied here
  *
  * \param[in] node_name  Name of pacemaker_remote node
  */
 void
 remote_ra_fail(const char *node_name)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (lrm_state && lrm_state_is_connected(lrm_state)) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         crm_info("Failing monitors on Pacemaker Remote node %s", node_name);
         ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
         ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
     }
 }
 
 /* A guest node fencing implied by host fencing looks like:
  *
  *  <pseudo_event id="103" operation="stonith" operation_key="stonith-lxc1-off"
  *                on_node="lxc1" on_node_uuid="lxc1">
  *     <attributes CRM_meta_on_node="lxc1" CRM_meta_on_node_uuid="lxc1"
  *                 CRM_meta_stonith_action="off" crm_feature_set="3.0.12"/>
  *     <downed>
  *       <node id="lxc1"/>
  *     </downed>
  *  </pseudo_event>
  */
 #define XPATH_PSEUDO_FENCE "/" XML_GRAPH_TAG_PSEUDO_EVENT \
     "[@" XML_LRM_ATTR_TASK "='stonith']/" XML_GRAPH_TAG_DOWNED \
     "/" XML_CIB_TAG_NODE
 
 /*!
  * \internal
  * \brief Check a pseudo-action for Pacemaker Remote node side effects
  *
  * \param[in,out] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_pseudo(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *result = getXpathResult(search, 0);
 
         /* Normally, we handle the necessary side effects of a guest node stop
          * action when reporting the remote agent's result. However, if the stop
          * is implied due to fencing, it will be a fencing pseudo-event, and
          * there won't be a result to report. Handle that case here.
          *
          * This will result in a duplicate call to remote_node_down() if the
          * guest stop was real instead of implied, but that shouldn't hurt.
          *
          * There is still one corner case that isn't handled: if a guest node
          * isn't running any resources when its host is fenced, it will appear
          * to be cleanly stopped, so there will be no pseudo-fence, and our
          * peer cache state will be incorrect unless and until the guest is
          * recovered.
          */
         if (result) {
             const char *remote = ID(result);
 
             if (remote) {
                 remote_node_down(remote, DOWN_ERASE_LRM);
             }
         }
     }
     freeXpathObject(search);
 }
 
 static void
 remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
 {
     xmlNode *update, *state;
     int call_opt;
     crm_node_t *node;
 
     call_opt = crmd_cib_smart_opt();
     node = crm_remote_peer_get(lrm_state->node_name);
     CRM_CHECK(node != NULL, return);
     update = create_xml_node(NULL, XML_CIB_TAG_STATUS);
     state = create_node_state_update(node, node_update_none, update,
                                      __func__);
     crm_xml_add(state, XML_NODE_IS_MAINTENANCE, maintenance?"1":"0");
     if (controld_update_cib(XML_CIB_TAG_STATUS, update, call_opt,
                             NULL) == pcmk_rc_ok) {
         /* TODO: still not 100% sure that async update will succeed ... */
         if (maintenance) {
             lrm_remote_set_flags(lrm_state, remote_in_maint);
         } else {
             lrm_remote_clear_flags(lrm_state, remote_in_maint);
         }
     }
     free_xml(update);
 }
 
 #define XPATH_PSEUDO_MAINTENANCE "//" XML_GRAPH_TAG_PSEUDO_EVENT \
     "[@" XML_LRM_ATTR_TASK "='" CRM_OP_MAINTENANCE_NODES "']/" \
     XML_GRAPH_TAG_MAINTENANCE
 
 /*!
  * \internal
  * \brief Check a pseudo-action holding updates for maintenance state
  *
  * \param[in,out] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_maintenance_nodes(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *node;
         int cnt = 0, cnt_remote = 0;
 
         for (node =
                 first_named_child(getXpathResult(search, 0), XML_CIB_TAG_NODE);
             node != NULL; node = pcmk__xml_next(node)) {
             lrm_state_t *lrm_state = lrm_state_find(ID(node));
 
             cnt++;
             if (lrm_state && lrm_state->remote_ra_data &&
                 pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) {
                 int is_maint;
 
                 cnt_remote++;
                 pcmk__scan_min_int(crm_element_value(node, XML_NODE_IS_MAINTENANCE),
                                    &is_maint, 0);
                 remote_ra_maintenance(lrm_state, is_maint);
             }
         }
         crm_trace("Action holds %d nodes (%d remotes found) "
                     "adjusting maintenance-mode", cnt, cnt_remote);
     }
     freeXpathObject(search);
 }
 
 gboolean
 remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     return pcmk_is_set(ra_data->status, remote_in_maint);
 }
 
 gboolean
 remote_ra_controlling_guest(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     return pcmk_is_set(ra_data->status, controlling_guest);
 }