diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c
index 1c533a7768..00bc98a88a 100644
--- a/daemons/controld/controld_cib.c
+++ b/daemons/controld/controld_cib.c
@@ -1,1055 +1,1055 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <unistd.h>  /* sleep */
 
 #include <crm/common/alerts_internal.h>
 #include <crm/common/xml.h>
 #include <crm/crm.h>
 #include <crm/lrmd_internal.h>
 
 #include <pacemaker-controld.h>
 
 // Call ID of the most recent in-progress CIB resource update (or 0 if none)
 static int pending_rsc_update = 0;
 
 /*!
  * \internal
  * \brief Respond to a dropped CIB connection
  *
  * \param[in] user_data  CIB connection that dropped
  */
 static void
 handle_cib_disconnect(gpointer user_data)
 {
     CRM_LOG_ASSERT(user_data == controld_globals.cib_conn);
 
     controld_trigger_fsa();
     controld_globals.cib_conn->state = cib_disconnected;
 
     if (pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) {
         // @TODO This should trigger a reconnect, not a shutdown
         crm_crit("Lost connection to the CIB manager, shutting down");
         register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
         controld_clear_fsa_input_flags(R_CIB_CONNECTED);
 
     } else { // Expected
         crm_info("Disconnected from the CIB manager");
     }
 }
 
 static void
 do_cib_updated(const char *event, xmlNode * msg)
 {
     const xmlNode *patchset = NULL;
     const char *client_name = NULL;
 
     crm_debug("Received CIB diff notification: DC=%s", pcmk__btoa(AM_I_DC));
 
     if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) {
         return;
     }
 
     if (cib__element_in_patchset(patchset, PCMK_XE_ALERTS)
         || cib__element_in_patchset(patchset, PCMK_XE_CRM_CONFIG)) {
 
         controld_trigger_config();
     }
 
     if (!AM_I_DC) {
         // We're not in control of the join sequence
         return;
     }
 
     client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME);
     if (!cib__client_triggers_refresh(client_name)) {
         // The CIB is still accurate
         return;
     }
 
     if (cib__element_in_patchset(patchset, PCMK_XE_NODES)
         || cib__element_in_patchset(patchset, PCMK_XE_STATUS)) {
 
         /* An unsafe client modified the PCMK_XE_NODES or PCMK_XE_STATUS
          * section. Ensure the node list is up-to-date, and start the join
          * process again so we get everyone's current resource history.
          */
         if (client_name == NULL) {
             client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTID);
         }
         crm_notice("Populating nodes and starting an election after %s event "
                    "triggered by %s",
                    event, pcmk__s(client_name, "(unidentified client)"));
 
         populate_cib_nodes(node_update_quick|node_update_all, __func__);
         register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL);
     }
 }
 
 void
 controld_disconnect_cib_manager(void)
 {
     cib_t *cib_conn = controld_globals.cib_conn;
 
     CRM_ASSERT(cib_conn != NULL);
 
     crm_debug("Disconnecting from the CIB manager");
 
     controld_clear_fsa_input_flags(R_CIB_CONNECTED);
 
     cib_conn->cmds->del_notify_callback(cib_conn, PCMK__VALUE_CIB_DIFF_NOTIFY,
                                         do_cib_updated);
     cib_free_callbacks(cib_conn);
 
     if (cib_conn->state != cib_disconnected) {
         cib_conn->cmds->set_secondary(cib_conn, cib_discard_reply);
         cib_conn->cmds->signoff(cib_conn);
     }
 }
 
 /* A_CIB_STOP, A_CIB_START, O_CIB_RESTART */
 void
 do_cib_control(long long action,
                enum crmd_fsa_cause cause,
                enum crmd_fsa_state cur_state,
                enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     static int cib_retries = 0;
 
     cib_t *cib_conn = controld_globals.cib_conn;
 
     void (*dnotify_fn) (gpointer user_data) = handle_cib_disconnect;
     void (*update_cb) (const char *event, xmlNodePtr msg) = do_cib_updated;
 
     int rc = pcmk_ok;
 
     CRM_ASSERT(cib_conn != NULL);
 
     if (pcmk_is_set(action, A_CIB_STOP)) {
         if ((cib_conn->state != cib_disconnected)
             && (pending_rsc_update != 0)) {
 
             crm_info("Waiting for resource update %d to complete",
                      pending_rsc_update);
             crmd_fsa_stall(FALSE);
             return;
         }
         controld_disconnect_cib_manager();
     }
 
     if (!pcmk_is_set(action, A_CIB_START)) {
         return;
     }
 
     if (cur_state == S_STOPPING) {
         crm_err("Ignoring request to connect to the CIB manager after "
                 "shutdown");
         return;
     }
 
     rc = cib_conn->cmds->signon(cib_conn, CRM_SYSTEM_CRMD,
                                 cib_command_nonblocking);
 
     if (rc != pcmk_ok) {
         // A short wait that usually avoids stalling the FSA
         sleep(1);
         rc = cib_conn->cmds->signon(cib_conn, CRM_SYSTEM_CRMD,
                                     cib_command_nonblocking);
     }
 
     if (rc != pcmk_ok) {
         crm_info("Could not connect to the CIB manager: %s", pcmk_strerror(rc));
 
     } else if (cib_conn->cmds->set_connection_dnotify(cib_conn,
                                                       dnotify_fn) != pcmk_ok) {
         crm_err("Could not set dnotify callback");
 
     } else if (cib_conn->cmds->add_notify_callback(cib_conn,
                                                    PCMK__VALUE_CIB_DIFF_NOTIFY,
                                                    update_cb) != pcmk_ok) {
         crm_err("Could not set CIB notification callback (update)");
 
     } else {
         controld_set_fsa_input_flags(R_CIB_CONNECTED);
         cib_retries = 0;
     }
 
     if (!pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) {
         cib_retries++;
 
         if (cib_retries < 30) {
             crm_warn("Couldn't complete CIB registration %d times... "
                      "pause and retry", cib_retries);
             controld_start_wait_timer();
             crmd_fsa_stall(FALSE);
 
         } else {
             crm_err("Could not complete CIB registration %d times... "
                     "hard error", cib_retries);
             register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
         }
     }
 }
 
 #define MIN_CIB_OP_TIMEOUT (30)
 
 /*!
  * \internal
  * \brief Get the timeout (in seconds) that should be used with CIB operations
  *
  * \return The maximum of 30 seconds, the value of the PCMK_cib_timeout
  *         environment variable, or 10 seconds times one more than the number of
  *         nodes in the cluster.
  */
 unsigned int
 cib_op_timeout(void)
 {
     unsigned int calculated_timeout = 10U * (pcmk__cluster_num_active_nodes()
                                              + pcmk__cluster_num_remote_nodes()
                                              + 1U);
 
     calculated_timeout = QB_MAX(calculated_timeout, MIN_CIB_OP_TIMEOUT);
     crm_trace("Calculated timeout: %s",
               pcmk__readable_interval(calculated_timeout * 1000));
 
     if (controld_globals.cib_conn) {
         controld_globals.cib_conn->call_timeout = calculated_timeout;
     }
     return calculated_timeout;
 }
 
 /*!
  * \internal
  * \brief Get CIB call options to use local scope if primary is unavailable
  *
  * \return CIB call options
  */
 int
 crmd_cib_smart_opt(void)
 {
     int call_opt = cib_none;
 
     if ((controld_globals.fsa_state == S_ELECTION)
         || (controld_globals.fsa_state == S_PENDING)) {
         crm_info("Sending update to local CIB in state: %s",
                  fsa_state2string(controld_globals.fsa_state));
         cib__set_call_options(call_opt, "update", cib_none);
     }
     return call_opt;
 }
 
 static void
 cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output,
                     void *user_data)
 {
     char *desc = user_data;
 
     if (rc == 0) {
         crm_debug("Deletion of %s (via CIB call %d) succeeded", desc, call_id);
     } else {
         crm_warn("Deletion of %s (via CIB call %d) failed: %s " QB_XS " rc=%d",
                  desc, call_id, pcmk_strerror(rc), rc);
     }
 }
 
 // Searches for various portions of PCMK__XE_NODE_STATE to delete
 
 // Match a particular node's PCMK__XE_NODE_STATE (takes node name 1x)
 #define XPATH_NODE_STATE "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
 
 // Node's lrm section (name 1x)
 #define XPATH_NODE_LRM XPATH_NODE_STATE "/" PCMK__XE_LRM
 
 /* Node's PCMK__XE_LRM_RSC_OP entries and PCMK__XE_LRM_RESOURCE entries without
  * unexpired lock
  * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x)
  */
 #define XPATH_NODE_LRM_UNLOCKED XPATH_NODE_STATE "//" PCMK__XE_LRM_RSC_OP   \
                                 "|" XPATH_NODE_STATE                        \
                                 "//" PCMK__XE_LRM_RESOURCE                  \
                                 "[not(@" PCMK_OPT_SHUTDOWN_LOCK ") "        \
                                     "or " PCMK_OPT_SHUTDOWN_LOCK "<%lld]"
 
 // Node's PCMK__XE_TRANSIENT_ATTRIBUTES section (name 1x)
 #define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" PCMK__XE_TRANSIENT_ATTRIBUTES
 
 // Everything under PCMK__XE_NODE_STATE (name 1x)
 #define XPATH_NODE_ALL          XPATH_NODE_STATE "/*"
 
 /* Unlocked history + transient attributes
  * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x, name 1x)
  */
 #define XPATH_NODE_ALL_UNLOCKED XPATH_NODE_LRM_UNLOCKED "|" XPATH_NODE_ATTRS
 
 /*!
  * \internal
  * \brief Get the XPath and description of a node state section to be deleted
  *
  * \param[in]  uname    Desired node
  * \param[in]  section  Subsection of \c PCMK__XE_NODE_STATE to be deleted
  * \param[out] xpath    Where to store XPath of \p section
  * \param[out] desc     If not \c NULL, where to store description of \p section
  */
 void
 controld_node_state_deletion_strings(const char *uname,
                                      enum controld_section_e section,
                                      char **xpath, char **desc)
 {
     const char *desc_pre = NULL;
 
     // Shutdown locks that started before this time are expired
     long long expire = (long long) time(NULL)
                        - controld_globals.shutdown_lock_limit;
 
     switch (section) {
         case controld_section_lrm:
             *xpath = crm_strdup_printf(XPATH_NODE_LRM, uname);
             desc_pre = "resource history";
             break;
         case controld_section_lrm_unlocked:
             *xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED,
                                        uname, uname, expire);
             desc_pre = "resource history (other than shutdown locks)";
             break;
         case controld_section_attrs:
             *xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname);
             desc_pre = "transient attributes";
             break;
         case controld_section_all:
             *xpath = crm_strdup_printf(XPATH_NODE_ALL, uname);
             desc_pre = "all state";
             break;
         case controld_section_all_unlocked:
             *xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED,
                                        uname, uname, expire, uname);
             desc_pre = "all state (other than shutdown locks)";
             break;
         default:
             // We called this function incorrectly
             CRM_ASSERT(false);
             break;
     }
 
     if (desc != NULL) {
         *desc = crm_strdup_printf("%s for node %s", desc_pre, uname);
     }
 }
 
 /*!
  * \internal
  * \brief Delete subsection of a node's CIB \c PCMK__XE_NODE_STATE
  *
  * \param[in] uname    Desired node
  * \param[in] section  Subsection of \c PCMK__XE_NODE_STATE to delete
  * \param[in] options  CIB call options to use
  */
 void
 controld_delete_node_state(const char *uname, enum controld_section_e section,
                            int options)
 {
     cib_t *cib = controld_globals.cib_conn;
     char *xpath = NULL;
     char *desc = NULL;
     int cib_rc = pcmk_ok;
 
     CRM_ASSERT((uname != NULL) && (cib != NULL));
 
     controld_node_state_deletion_strings(uname, section, &xpath, &desc);
 
     cib__set_call_options(options, "node state deletion",
                           cib_xpath|cib_multiple);
     cib_rc = cib->cmds->remove(cib, xpath, NULL, options);
     fsa_register_cib_callback(cib_rc, desc, cib_delete_callback);
     crm_info("Deleting %s (via CIB call %d) " QB_XS " xpath=%s",
              desc, cib_rc, xpath);
 
     // CIB library handles freeing desc
     free(xpath);
 }
 
 // Takes node name and resource ID
 #define XPATH_RESOURCE_HISTORY "//" PCMK__XE_NODE_STATE                 \
                                "[@" PCMK_XA_UNAME "='%s']/"             \
                                PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES  \
                                "/" PCMK__XE_LRM_RESOURCE                \
                                "[@" PCMK_XA_ID "='%s']"
 // @TODO could add "and @PCMK_OPT_SHUTDOWN_LOCK" to limit to locks
 
 /*!
  * \internal
  * \brief Clear resource history from CIB for a given resource and node
  *
  * \param[in]  rsc_id        ID of resource to be cleared
  * \param[in]  node          Node whose resource history should be cleared
  * \param[in]  user_name     ACL user name to use
  * \param[in]  call_options  CIB call options
  *
  * \return Standard Pacemaker return code
  */
 int
 controld_delete_resource_history(const char *rsc_id, const char *node,
                                  const char *user_name, int call_options)
 {
     char *desc = NULL;
     char *xpath = NULL;
     int rc = pcmk_rc_ok;
     cib_t *cib = controld_globals.cib_conn;
 
     CRM_CHECK((rsc_id != NULL) && (node != NULL), return EINVAL);
 
     desc = crm_strdup_printf("resource history for %s on %s", rsc_id, node);
     if (cib == NULL) {
         crm_err("Unable to clear %s: no CIB connection", desc);
         free(desc);
         return ENOTCONN;
     }
 
     // Ask CIB to delete the entry
     xpath = crm_strdup_printf(XPATH_RESOURCE_HISTORY, node, rsc_id);
 
     cib->cmds->set_user(cib, user_name);
     rc = cib->cmds->remove(cib, xpath, NULL, call_options|cib_xpath);
     cib->cmds->set_user(cib, NULL);
 
     if (rc < 0) {
         rc = pcmk_legacy2rc(rc);
         crm_err("Could not delete resource status of %s on %s%s%s: %s "
                 QB_XS " rc=%d", rsc_id, node,
                 (user_name? " for user " : ""), (user_name? user_name : ""),
                 pcmk_rc_str(rc), rc);
         free(desc);
         free(xpath);
         return rc;
     }
 
     if (pcmk_is_set(call_options, cib_sync_call)) {
         if (pcmk_is_set(call_options, cib_dryrun)) {
             crm_debug("Deletion of %s would succeed", desc);
         } else {
             crm_debug("Deletion of %s succeeded", desc);
         }
         free(desc);
 
     } else {
         crm_info("Clearing %s (via CIB call %d) " QB_XS " xpath=%s",
                  desc, rc, xpath);
         fsa_register_cib_callback(rc, desc, cib_delete_callback);
         // CIB library handles freeing desc
     }
 
     free(xpath);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Build XML and string of parameters meeting some criteria, for digest
  *
  * \param[in]  op          Executor event with parameter table to use
  * \param[in]  metadata    Parsed meta-data for executed resource agent
  * \param[in]  param_type  Flag used for selection criteria
  * \param[out] result      Will be set to newly created XML with selected
  *                         parameters as attributes
  *
  * \return Newly allocated space-separated string of parameter names
  * \note Selection criteria varies by param_type: for the restart digest, we
  *       want parameters that are *not* marked reloadable (OCF 1.1) or that
  *       *are* marked unique (pre-1.1), for both string and XML results; for the
  *       secure digest, we want parameters that *are* marked private for the
  *       string, but parameters that are *not* marked private for the XML.
  * \note It is the caller's responsibility to free the string return value with
  *       \p g_string_free() and the XML result with \p pcmk__xml_free().
  */
 static GString *
 build_parameter_list(const lrmd_event_data_t *op,
                      const struct ra_metadata_s *metadata,
                      enum ra_param_flags_e param_type, xmlNode **result)
 {
     GString *list = NULL;
 
     *result = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
 
     /* Consider all parameters only except private ones to be consistent with
      * what scheduler does with calculate_secure_digest().
      */
     if (param_type == ra_param_private
         && compare_version(controld_globals.dc_version, "3.16.0") >= 0) {
         g_hash_table_foreach(op->params, hash2field, *result);
         pcmk__filter_op_for_digest(*result);
     }
 
     for (GList *iter = metadata->ra_params; iter != NULL; iter = iter->next) {
         struct ra_param_s *param = (struct ra_param_s *) iter->data;
 
         bool accept_for_list = false;
         bool accept_for_xml = false;
 
         switch (param_type) {
             case ra_param_reloadable:
                 accept_for_list = !pcmk_is_set(param->rap_flags, param_type);
                 accept_for_xml = accept_for_list;
                 break;
 
             case ra_param_unique:
                 accept_for_list = pcmk_is_set(param->rap_flags, param_type);
                 accept_for_xml = accept_for_list;
                 break;
 
             case ra_param_private:
                 accept_for_list = pcmk_is_set(param->rap_flags, param_type);
                 accept_for_xml = !accept_for_list;
                 break;
         }
 
         if (accept_for_list) {
             crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type));
 
             if (list == NULL) {
                 // We will later search for " WORD ", so start list with a space
                 pcmk__add_word(&list, 256, " ");
             }
             pcmk__add_word(&list, 0, param->rap_name);
 
         } else {
             crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type));
         }
 
         if (accept_for_xml) {
             const char *v = g_hash_table_lookup(op->params, param->rap_name);
 
             if (v != NULL) {
                 crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v);
                 crm_xml_add(*result, param->rap_name, v);
             }
 
         } else {
             crm_trace("Removing attr %s from the xml result", param->rap_name);
             pcmk__xe_remove_attr(*result, param->rap_name);
         }
     }
 
     if (list != NULL) {
         // We will later search for " WORD ", so end list with a space
         pcmk__add_word(&list, 0, " ");
     }
     return list;
 }
 
 static void
 append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
                     xmlNode *update, const char *version)
 {
     GString *list = NULL;
     char *digest = NULL;
     xmlNode *restart = NULL;
 
     CRM_LOG_ASSERT(op->params != NULL);
 
     if (op->interval_ms > 0) {
         /* monitors are not reloadable */
         return;
     }
 
     if (pcmk_is_set(metadata->ra_flags, ra_supports_reload_agent)) {
         /* Add parameters not marked reloadable to the PCMK__XA_OP_FORCE_RESTART
          * list
          */
         list = build_parameter_list(op, metadata, ra_param_reloadable,
                                     &restart);
 
     } else if (pcmk_is_set(metadata->ra_flags, ra_supports_legacy_reload)) {
         /* @COMPAT pre-OCF-1.1 resource agents
          *
          * Before OCF 1.1, Pacemaker abused "unique=0" to indicate
          * reloadability. Add any parameters with unique="1" to the
          * PCMK__XA_OP_FORCE_RESTART list.
          */
         list = build_parameter_list(op, metadata, ra_param_unique, &restart);
 
     } else {
         // Resource does not support agent reloads
         return;
     }
 
     digest = pcmk__digest_operation(restart);
     /* Add PCMK__XA_OP_FORCE_RESTART and PCMK__XA_OP_RESTART_DIGEST to indicate
      * the resource supports reload, no matter if it actually supports any
      * reloadable parameters
      */
     crm_xml_add(update, PCMK__XA_OP_FORCE_RESTART,
                 (list == NULL)? "" : (const char *) list->str);
     crm_xml_add(update, PCMK__XA_OP_RESTART_DIGEST, digest);
 
     if ((list != NULL) && (list->len > 0)) {
         crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str);
     } else {
         crm_trace("%s: %s", op->rsc_id, digest);
     }
 
     if (list != NULL) {
         g_string_free(list, TRUE);
     }
     pcmk__xml_free(restart);
     free(digest);
 }
 
 static void
 append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
                    xmlNode *update, const char *version)
 {
     GString *list = NULL;
     char *digest = NULL;
     xmlNode *secure = NULL;
 
     CRM_LOG_ASSERT(op->params != NULL);
 
     /* To keep PCMK__XA_OP_SECURE_PARAMS short, we want it to contain the secure
      * parameters but PCMK__XA_OP_SECURE_DIGEST to be based on the insecure ones
      */
     list = build_parameter_list(op, metadata, ra_param_private, &secure);
 
     if (list != NULL) {
         digest = pcmk__digest_operation(secure);
         crm_xml_add(update, PCMK__XA_OP_SECURE_PARAMS,
                     (const char *) list->str);
         crm_xml_add(update, PCMK__XA_OP_SECURE_DIGEST, digest);
 
         crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str);
         g_string_free(list, TRUE);
     } else {
         crm_trace("%s: no secure parameters", op->rsc_id);
     }
 
     pcmk__xml_free(secure);
     free(digest);
 }
 
 /*!
  * \internal
  * \brief Create XML for a resource history entry
  *
  * \param[in]     func       Function name of caller
  * \param[in,out] parent     XML to add entry to
  * \param[in]     rsc        Affected resource
  * \param[in,out] op         Action to add an entry for (or NULL to do nothing)
  * \param[in]     node_name  Node where action occurred
  */
 void
 controld_add_resource_history_xml_as(const char *func, xmlNode *parent,
                                      const lrmd_rsc_info_t *rsc,
                                      lrmd_event_data_t *op,
                                      const char *node_name)
 {
     int target_rc = 0;
     xmlNode *xml_op = NULL;
     struct ra_metadata_s *metadata = NULL;
     const char *caller_version = NULL;
     lrm_state_t *lrm_state = NULL;
 
     if (op == NULL) {
         return;
     }
 
     target_rc = rsc_op_expected_rc(op);
 
     caller_version = g_hash_table_lookup(op->params, PCMK_XA_CRM_FEATURE_SET);
     CRM_CHECK(caller_version != NULL, caller_version = CRM_FEATURE_SET);
 
     xml_op = pcmk__create_history_xml(parent, op, caller_version, target_rc,
                                       controld_globals.our_nodename, func);
     if (xml_op == NULL) {
         return;
     }
 
     if ((rsc == NULL) || (op->params == NULL)
         || !crm_op_needs_metadata(rsc->standard, op->op_type)) {
 
         crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)",
                   op->op_type, op->rsc_id, op->params, rsc);
         return;
     }
 
     lrm_state = lrm_state_find(node_name);
     if (lrm_state == NULL) {
         crm_warn("Cannot calculate digests for operation " PCMK__OP_FMT
                  " because we have no connection to executor for %s",
                  op->rsc_id, op->op_type, op->interval_ms, node_name);
         return;
     }
 
     /* Ideally the metadata is cached, and the agent is just a fallback.
      *
      * @TODO Go through all callers and ensure they get metadata asynchronously
      * first.
      */
     metadata = controld_get_rsc_metadata(lrm_state, rsc,
                                          controld_metadata_from_agent
                                          |controld_metadata_from_cache);
     if (metadata == NULL) {
         return;
     }
 
     crm_trace("Including additional digests for %s:%s:%s",
               rsc->standard, rsc->provider, rsc->type);
     append_restart_list(op, metadata, xml_op, caller_version);
     append_secure_list(op, metadata, xml_op, caller_version);
 
     return;
 }
 
 /*!
  * \internal
  * \brief Record an action as pending in the CIB, if appropriate
  *
  * \param[in]     node_name  Node where the action is pending
  * \param[in]     rsc        Resource that action is for
  * \param[in,out] op         Pending action
  *
  * \return true if action was recorded in CIB, otherwise false
  */
 bool
 controld_record_pending_op(const char *node_name, const lrmd_rsc_info_t *rsc,
                            lrmd_event_data_t *op)
 {
     const char *record_pending = NULL;
 
     CRM_CHECK((node_name != NULL) && (rsc != NULL) && (op != NULL),
               return false);
 
     // Never record certain operation types as pending
     if ((op->op_type == NULL) || (op->params == NULL)
         || !controld_action_is_recordable(op->op_type)) {
         return false;
     }
 
     // Check action's PCMK_META_RECORD_PENDING meta-attribute (defaults to true)
     record_pending = crm_meta_value(op->params, PCMK_META_RECORD_PENDING);
     if ((record_pending != NULL) && !crm_is_true(record_pending)) {
         return false;
     }
 
     op->call_id = -1;
     op->t_run = time(NULL);
-    op->t_rcchange = (unsigned int) op->t_run;
+    op->t_rcchange = op->t_run;
 
     lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL);
 
     crm_debug("Recording pending %s-interval %s for %s on %s in the CIB",
               pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
               node_name);
     controld_update_resource_history(node_name, rsc, op, 0);
     return true;
 }
 
 static void
 cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     switch (rc) {
         case pcmk_ok:
         case -pcmk_err_diff_failed:
         case -pcmk_err_diff_resync:
             crm_trace("Resource history update completed (call=%d rc=%d)",
                       call_id, rc);
             break;
         default:
             if (call_id > 0) {
                 crm_warn("Resource history update %d failed: %s "
                          QB_XS " rc=%d", call_id, pcmk_strerror(rc), rc);
             } else {
                 crm_warn("Resource history update failed: %s " QB_XS " rc=%d",
                          pcmk_strerror(rc), rc);
             }
     }
 
     if (call_id == pending_rsc_update) {
         pending_rsc_update = 0;
         controld_trigger_fsa();
     }
 }
 
 /* Only successful stops, and probes that found the resource inactive, get locks
  * recorded in the history. This ensures the resource stays locked to the node
  * until it is active there again after the node comes back up.
  */
 static bool
 should_preserve_lock(lrmd_event_data_t *op)
 {
     if (!pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
         return false;
     }
     if (!strcmp(op->op_type, PCMK_ACTION_STOP) && (op->rc == PCMK_OCF_OK)) {
         return true;
     }
     if (!strcmp(op->op_type, PCMK_ACTION_MONITOR)
         && (op->rc == PCMK_OCF_NOT_RUNNING)) {
         return true;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Request a CIB update
  *
  * \param[in]     section    Section of CIB to update
  * \param[in]     data       New XML of CIB section to update
  * \param[in]     options    CIB call options
  * \param[in]     callback   If not \c NULL, set this as the operation callback
  *
  * \return Standard Pacemaker return code
  *
  * \note If \p callback is \p cib_rsc_callback(), the CIB update's call ID is
  *       stored in \p pending_rsc_update on success.
  */
 int
 controld_update_cib(const char *section, xmlNode *data, int options,
                     void (*callback)(xmlNode *, int, int, xmlNode *, void *))
 {
     cib_t *cib = controld_globals.cib_conn;
     int cib_rc = -ENOTCONN;
 
     CRM_ASSERT(data != NULL);
 
     if (cib != NULL) {
         cib_rc = cib->cmds->modify(cib, section, data, options);
         if (cib_rc >= 0) {
             crm_debug("Submitted CIB update %d for %s section",
                       cib_rc, section);
         }
     }
 
     if (callback == NULL) {
         if (cib_rc < 0) {
             crm_err("Failed to update CIB %s section: %s",
                     section, pcmk_rc_str(pcmk_legacy2rc(cib_rc)));
         }
 
     } else {
         if ((cib_rc >= 0) && (callback == cib_rsc_callback)) {
             /* Checking for a particular callback is a little hacky, but it
              * didn't seem worth adding an output argument for cib_rc for just
              * one use case.
              */
             pending_rsc_update = cib_rc;
         }
         fsa_register_cib_callback(cib_rc, NULL, callback);
     }
 
     return (cib_rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(cib_rc);
 }
 
 /*!
  * \internal
  * \brief Update resource history entry in CIB
  *
  * \param[in]     node_name  Node where action occurred
  * \param[in]     rsc        Resource that action is for
  * \param[in,out] op         Action to record
  * \param[in]     lock_time  If nonzero, when resource was locked to node
  *
  * \note On success, the CIB update's call ID will be stored in
  *       pending_rsc_update.
  */
 void
 controld_update_resource_history(const char *node_name,
                                  const lrmd_rsc_info_t *rsc,
                                  lrmd_event_data_t *op, time_t lock_time)
 {
     xmlNode *update = NULL;
     xmlNode *xml = NULL;
     int call_opt = crmd_cib_smart_opt();
     const char *node_id = NULL;
     const char *container = NULL;
 
     CRM_CHECK((node_name != NULL) && (op != NULL), return);
 
     if (rsc == NULL) {
         crm_warn("Resource %s no longer exists in the executor", op->rsc_id);
         controld_ack_event_directly(NULL, NULL, rsc, op, op->rsc_id);
         return;
     }
 
     // <status>
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
 
     //   <node_state ...>
     xml = pcmk__xe_create(update, PCMK__XE_NODE_STATE);
     if (pcmk__str_eq(node_name, controld_globals.our_nodename,
                      pcmk__str_casei)) {
         node_id = controld_globals.our_uuid;
     } else {
         node_id = node_name;
         pcmk__xe_set_bool_attr(xml, PCMK_XA_REMOTE_NODE, true);
     }
     crm_xml_add(xml, PCMK_XA_ID, node_id);
     crm_xml_add(xml, PCMK_XA_UNAME, node_name);
     crm_xml_add(xml, PCMK_XA_CRM_DEBUG_ORIGIN, __func__);
 
     //     <lrm ...>
     xml = pcmk__xe_create(xml, PCMK__XE_LRM);
     crm_xml_add(xml, PCMK_XA_ID, node_id);
 
     //       <lrm_resources>
     xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCES);
 
     //         <lrm_resource ...>
     xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCE);
     crm_xml_add(xml, PCMK_XA_ID, op->rsc_id);
     crm_xml_add(xml, PCMK_XA_CLASS, rsc->standard);
     crm_xml_add(xml, PCMK_XA_PROVIDER, rsc->provider);
     crm_xml_add(xml, PCMK_XA_TYPE, rsc->type);
     if (lock_time != 0) {
         /* Actions on a locked resource should either preserve the lock by
          * recording it with the action result, or clear it.
          */
         if (!should_preserve_lock(op)) {
             lock_time = 0;
         }
         crm_xml_add_ll(xml, PCMK_OPT_SHUTDOWN_LOCK, (long long) lock_time);
     }
     if (op->params != NULL) {
         container = g_hash_table_lookup(op->params,
                                         CRM_META "_" PCMK__META_CONTAINER);
         if (container != NULL) {
             crm_trace("Resource %s is a part of container resource %s",
                       op->rsc_id, container);
             crm_xml_add(xml, PCMK__META_CONTAINER, container);
         }
     }
 
     //           <lrm_resource_op ...> (possibly more than one)
     controld_add_resource_history_xml(xml, rsc, op, node_name);
 
     /* Update CIB asynchronously. Even if it fails, the resource state should be
      * discovered during the next election. Worst case, the node is wrongly
      * fenced for running a resource it isn't.
      */
     crm_log_xml_trace(update, __func__);
     controld_update_cib(PCMK_XE_STATUS, update, call_opt, cib_rsc_callback);
     pcmk__xml_free(update);
 }
 
 /*!
  * \internal
  * \brief Erase an LRM history entry from the CIB, given the operation data
  *
  * \param[in] op         Operation whose history should be deleted
  */
 void
 controld_delete_action_history(const lrmd_event_data_t *op)
 {
     xmlNode *xml_top = NULL;
 
     CRM_CHECK(op != NULL, return);
 
     xml_top = pcmk__xe_create(NULL, PCMK__XE_LRM_RSC_OP);
     crm_xml_add_int(xml_top, PCMK__XA_CALL_ID, op->call_id);
     crm_xml_add(xml_top, PCMK__XA_TRANSITION_KEY, op->user_data);
 
     if (op->interval_ms > 0) {
         char *op_id = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms);
 
         /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */
         crm_xml_add(xml_top, PCMK_XA_ID, op_id);
         free(op_id);
     }
 
     crm_debug("Erasing resource operation history for " PCMK__OP_FMT " (call=%d)",
               op->rsc_id, op->op_type, op->interval_ms, op->call_id);
 
     controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn,
                                             PCMK_XE_STATUS, xml_top, cib_none);
     crm_log_xml_trace(xml_top, "op:cancel");
     pcmk__xml_free(xml_top);
 }
 
 /* Define xpath to find LRM resource history entry by node and resource */
 #define XPATH_HISTORY                                   \
     "/" PCMK_XE_CIB "/" PCMK_XE_STATUS                  \
     "/" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" \
     "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES         \
     "/" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"  \
     "/" PCMK__XE_LRM_RSC_OP
 
 /* ... and also by operation key */
 #define XPATH_HISTORY_ID XPATH_HISTORY "[@" PCMK_XA_ID "='%s']"
 
 /* ... and also by operation key and operation call ID */
 #define XPATH_HISTORY_CALL XPATH_HISTORY \
     "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_CALL_ID "='%d']"
 
 /* ... and also by operation key and original operation key */
 #define XPATH_HISTORY_ORIG XPATH_HISTORY \
     "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_OPERATION_KEY "='%s']"
 
 /*!
  * \internal
  * \brief Delete a last_failure resource history entry from the CIB
  *
  * \param[in] rsc_id       Name of resource to clear history for
  * \param[in] node         Name of node to clear history for
  * \param[in] action       If specified, delete only if this was failed action
  * \param[in] interval_ms  If \p action is specified, it has this interval
  */
 void
 controld_cib_delete_last_failure(const char *rsc_id, const char *node,
                                  const char *action, guint interval_ms)
 {
     char *xpath = NULL;
     char *last_failure_key = NULL;
     CRM_CHECK((rsc_id != NULL) && (node != NULL), return);
 
     // Generate XPath to match desired entry
     last_failure_key = pcmk__op_key(rsc_id, "last_failure", 0);
     if (action == NULL) {
         xpath = crm_strdup_printf(XPATH_HISTORY_ID, node, rsc_id,
                                   last_failure_key);
     } else {
         char *action_key = pcmk__op_key(rsc_id, action, interval_ms);
 
         xpath = crm_strdup_printf(XPATH_HISTORY_ORIG, node, rsc_id,
                                   last_failure_key, action_key);
         free(action_key);
     }
     free(last_failure_key);
 
     controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath,
                                             NULL, cib_xpath);
     free(xpath);
 }
 
 /*!
  * \internal
  * \brief Delete resource history entry from the CIB, given operation key
  *
  * \param[in] rsc_id     Name of resource to clear history for
  * \param[in] node       Name of node to clear history for
  * \param[in] key        Operation key of operation to clear history for
  * \param[in] call_id    If specified, delete entry only if it has this call ID
  */
 void
 controld_delete_action_history_by_key(const char *rsc_id, const char *node,
                                       const char *key, int call_id)
 {
     char *xpath = NULL;
 
     CRM_CHECK((rsc_id != NULL) && (node != NULL) && (key != NULL), return);
 
     if (call_id > 0) {
         xpath = crm_strdup_printf(XPATH_HISTORY_CALL, node, rsc_id, key,
                                   call_id);
     } else {
         xpath = crm_strdup_printf(XPATH_HISTORY_ID, node, rsc_id, key);
     }
     controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath,
                                             NULL, cib_xpath);
     free(xpath);
 }
diff --git a/daemons/controld/controld_execd.c b/daemons/controld/controld_execd.c
index 479045b8bd..bcd8d98254 100644
--- a/daemons/controld/controld_execd.c
+++ b/daemons/controld/controld_execd.c
@@ -1,2420 +1,2420 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <regex.h>
 #include <sys/param.h>
 #include <sys/types.h>
 #include <sys/wait.h>
 
 #include <crm/crm.h>
 #include <crm/lrmd.h>           // lrmd_event_data_t, lrmd_rsc_info_t, etc.
 #include <crm/services.h>
 #include <crm/common/xml.h>
 #include <crm/pengine/rules.h>
 #include <crm/lrmd_internal.h>
 
 #include <pacemaker-internal.h>
 #include <pacemaker-controld.h>
 
 #define START_DELAY_THRESHOLD 5 * 60 * 1000
 #define MAX_LRM_REG_FAILS 30
 
 struct delete_event_s {
     int rc;
     const char *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id);
 static gboolean build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list);
 static gboolean stop_recurring_actions(gpointer key, gpointer value, gpointer user_data);
 
 static lrmd_event_data_t *construct_op(const lrm_state_t *lrm_state,
                                        const xmlNode *rsc_op,
                                        const char *rsc_id,
                                        const char *operation);
 static void do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc,
                           xmlNode *msg, struct ra_metadata_s *md);
 
 static gboolean lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state,
                                          int log_level);
 
 static void
 lrm_connection_destroy(void)
 {
     if (pcmk_is_set(controld_globals.fsa_input_register, R_LRM_CONNECTED)) {
         crm_crit("Lost connection to local executor");
         register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
         controld_clear_fsa_input_flags(R_LRM_CONNECTED);
     }
 }
 
 static char *
 make_stop_id(const char *rsc, int call_id)
 {
     return crm_strdup_printf("%s:%d", rsc, call_id);
 }
 
 static void
 copy_instance_keys(gpointer key, gpointer value, gpointer user_data)
 {
     if (strstr(key, CRM_META "_") == NULL) {
         pcmk__insert_dup(user_data, (const char *) key, (const char *) value);
     }
 }
 
 static void
 copy_meta_keys(gpointer key, gpointer value, gpointer user_data)
 {
     if (strstr(key, CRM_META "_") != NULL) {
         pcmk__insert_dup(user_data, (const char *) key, (const char *) value);
     }
 }
 
 /*!
  * \internal
  * \brief Remove a recurring operation from a resource's history
  *
  * \param[in,out] history  Resource history to modify
  * \param[in]     op       Operation to remove
  *
  * \return TRUE if the operation was found and removed, FALSE otherwise
  */
 static gboolean
 history_remove_recurring_op(rsc_history_t *history, const lrmd_event_data_t *op)
 {
     GList *iter;
 
     for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) {
         lrmd_event_data_t *existing = iter->data;
 
         if ((op->interval_ms == existing->interval_ms)
             && pcmk__str_eq(op->rsc_id, existing->rsc_id, pcmk__str_none)
             && pcmk__str_eq(op->op_type, existing->op_type, pcmk__str_casei)) {
 
             history->recurring_op_list = g_list_delete_link(history->recurring_op_list, iter);
             lrmd_free_event(existing);
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Free all recurring operations in resource history
  *
  * \param[in,out] history  Resource history to modify
  */
 static void
 history_free_recurring_ops(rsc_history_t *history)
 {
     GList *iter;
 
     for (iter = history->recurring_op_list; iter != NULL; iter = iter->next) {
         lrmd_free_event(iter->data);
     }
     g_list_free(history->recurring_op_list);
     history->recurring_op_list = NULL;
 }
 
 /*!
  * \internal
  * \brief Free resource history
  *
  * \param[in,out] history  Resource history to free
  */
 void
 history_free(gpointer data)
 {
     rsc_history_t *history = (rsc_history_t*)data;
 
     if (history->stop_params) {
         g_hash_table_destroy(history->stop_params);
     }
 
     /* Don't need to free history->rsc.id because it's set to history->id */
     free(history->rsc.type);
     free(history->rsc.standard);
     free(history->rsc.provider);
 
     lrmd_free_event(history->failed);
     lrmd_free_event(history->last);
     free(history->id);
     history_free_recurring_ops(history);
     free(history);
 }
 
 static void
 update_history_cache(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, lrmd_event_data_t * op)
 {
     int target_rc = 0;
     rsc_history_t *entry = NULL;
 
     if (op->rsc_deleted) {
         crm_debug("Purged history for '%s' after %s", op->rsc_id, op->op_type);
         controld_delete_resource_history(op->rsc_id, lrm_state->node_name,
                                          NULL, crmd_cib_smart_opt());
         return;
     }
 
     if (pcmk__str_eq(op->op_type, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
         return;
     }
 
     crm_debug("Updating history for '%s' with %s op", op->rsc_id, op->op_type);
 
     entry = g_hash_table_lookup(lrm_state->resource_history, op->rsc_id);
     if (entry == NULL && rsc) {
         entry = pcmk__assert_alloc(1, sizeof(rsc_history_t));
         entry->id = pcmk__str_copy(op->rsc_id);
         g_hash_table_insert(lrm_state->resource_history, entry->id, entry);
 
         entry->rsc.id = entry->id;
         entry->rsc.type = pcmk__str_copy(rsc->type);
         entry->rsc.standard = pcmk__str_copy(rsc->standard);
         entry->rsc.provider = pcmk__str_copy(rsc->provider);
 
     } else if (entry == NULL) {
         crm_info("Resource %s no longer exists, not updating cache", op->rsc_id);
         return;
     }
 
     entry->last_callid = op->call_id;
     target_rc = rsc_op_expected_rc(op);
     if (op->op_status == PCMK_EXEC_CANCELLED) {
         if (op->interval_ms > 0) {
             crm_trace("Removing cancelled recurring op: " PCMK__OP_FMT,
                       op->rsc_id, op->op_type, op->interval_ms);
             history_remove_recurring_op(entry, op);
             return;
         } else {
             crm_trace("Skipping " PCMK__OP_FMT " rc=%d, status=%d",
                       op->rsc_id, op->op_type, op->interval_ms, op->rc,
                       op->op_status);
         }
 
     } else if (did_rsc_op_fail(op, target_rc)) {
         /* Store failed monitors here, otherwise the block below will cause them
          * to be forgotten when a stop happens.
          */
         if (entry->failed) {
             lrmd_free_event(entry->failed);
         }
         entry->failed = lrmd_copy_event(op);
 
     } else if (op->interval_ms == 0) {
         if (entry->last) {
             lrmd_free_event(entry->last);
         }
         entry->last = lrmd_copy_event(op);
 
         if (op->params && pcmk__strcase_any_of(op->op_type, PCMK_ACTION_START,
                                                PCMK_ACTION_RELOAD,
                                                PCMK_ACTION_RELOAD_AGENT,
                                                PCMK_ACTION_MONITOR, NULL)) {
             if (entry->stop_params) {
                 g_hash_table_destroy(entry->stop_params);
             }
             entry->stop_params = pcmk__strkey_table(free, free);
 
             g_hash_table_foreach(op->params, copy_instance_keys, entry->stop_params);
         }
     }
 
     if (op->interval_ms > 0) {
         /* Ensure there are no duplicates */
         history_remove_recurring_op(entry, op);
 
         crm_trace("Adding recurring op: " PCMK__OP_FMT,
                   op->rsc_id, op->op_type, op->interval_ms);
         entry->recurring_op_list = g_list_prepend(entry->recurring_op_list, lrmd_copy_event(op));
 
     } else if ((entry->recurring_op_list != NULL)
                 && !pcmk__str_eq(op->op_type, PCMK_ACTION_MONITOR,
                                  pcmk__str_casei)) {
         crm_trace("Dropping %d recurring ops because of: " PCMK__OP_FMT,
                   g_list_length(entry->recurring_op_list), op->rsc_id,
                   op->op_type, op->interval_ms);
         history_free_recurring_ops(entry);
     }
 }
 
 /*!
  * \internal
  * \brief Send a direct OK ack for a resource task
  *
  * \param[in] lrm_state  LRM connection
  * \param[in] input      Input message being ack'ed
  * \param[in] rsc_id     ID of affected resource
  * \param[in] rsc        Affected resource (if available)
  * \param[in] task       Operation task being ack'ed
  * \param[in] ack_host   Name of host to send ack to
  * \param[in] ack_sys    IPC system name to ack
  */
 static void
 send_task_ok_ack(const lrm_state_t *lrm_state, const ha_msg_input_t *input,
                  const char *rsc_id, const lrmd_rsc_info_t *rsc,
                  const char *task, const char *ack_host, const char *ack_sys)
 {
     lrmd_event_data_t *op = construct_op(lrm_state, input->xml, rsc_id, task);
 
     lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
     controld_ack_event_directly(ack_host, ack_sys, rsc, op, rsc_id);
     lrmd_free_event(op);
 }
 
 static inline const char *
 op_node_name(lrmd_event_data_t *op)
 {
     return pcmk__s(op->remote_nodename, controld_globals.our_nodename);
 }
 
 void
 lrm_op_callback(lrmd_event_data_t * op)
 {
     CRM_CHECK(op != NULL, return);
     switch (op->type) {
         case lrmd_event_disconnect:
             if (op->remote_nodename == NULL) {
                 /* If this is the local executor IPC connection, set the right
                  * bits in the controller when the connection goes down.
                  */
                 lrm_connection_destroy();
             }
             break;
 
         case lrmd_event_exec_complete:
             {
                 lrm_state_t *lrm_state = lrm_state_find(op_node_name(op));
 
                 CRM_ASSERT(lrm_state != NULL);
                 process_lrm_event(lrm_state, op, NULL, NULL);
             }
             break;
 
         default:
             break;
     }
 }
 
 static void
 try_local_executor_connect(long long action, fsa_data_t *msg_data,
                            lrm_state_t *lrm_state)
 {
     int rc = pcmk_rc_ok;
 
     crm_debug("Connecting to the local executor");
 
     // If we can connect, great
     rc = controld_connect_local_executor(lrm_state);
     if (rc == pcmk_rc_ok) {
         controld_set_fsa_input_flags(R_LRM_CONNECTED);
         crm_info("Connection to the local executor established");
         return;
     }
 
     // Otherwise, if we can try again, set a timer to do so
     if (lrm_state->num_lrm_register_fails < MAX_LRM_REG_FAILS) {
         crm_warn("Failed to connect to the local executor %d time%s "
                  "(%d max): %s", lrm_state->num_lrm_register_fails,
                  pcmk__plural_s(lrm_state->num_lrm_register_fails),
                  MAX_LRM_REG_FAILS, pcmk_rc_str(rc));
         controld_start_wait_timer();
         crmd_fsa_stall(FALSE);
         return;
     }
 
     // Otherwise give up
     crm_err("Failed to connect to the executor the max allowed "
             "%d time%s: %s", lrm_state->num_lrm_register_fails,
             pcmk__plural_s(lrm_state->num_lrm_register_fails),
             pcmk_rc_str(rc));
     register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
 }
 
 /*	 A_LRM_CONNECT	*/
 void
 do_lrm_control(long long action,
                enum crmd_fsa_cause cause,
                enum crmd_fsa_state cur_state,
                enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     /* This only pertains to local executor connections. Remote connections are
      * handled as resources within the scheduler. Connecting and disconnecting
      * from remote executor instances is handled differently.
      */
 
     lrm_state_t *lrm_state = NULL;
 
     if (controld_globals.our_nodename == NULL) {
         return; /* Nothing to do */
     }
     lrm_state = lrm_state_find_or_create(controld_globals.our_nodename);
     if (lrm_state == NULL) {
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
         return;
     }
 
     if (action & A_LRM_DISCONNECT) {
         if (lrm_state_verify_stopped(lrm_state, cur_state, LOG_INFO) == FALSE) {
             if (action == A_LRM_DISCONNECT) {
                 crmd_fsa_stall(FALSE);
                 return;
             }
         }
 
         controld_clear_fsa_input_flags(R_LRM_CONNECTED);
         lrm_state_disconnect(lrm_state);
         lrm_state_reset_tables(lrm_state, FALSE);
     }
 
     if (action & A_LRM_CONNECT) {
         try_local_executor_connect(action, msg_data, lrm_state);
     }
 
     if (action & ~(A_LRM_CONNECT | A_LRM_DISCONNECT)) {
         crm_err("Unexpected action %s in %s", fsa_action2string(action),
                 __func__);
     }
 }
 
 static gboolean
 lrm_state_verify_stopped(lrm_state_t * lrm_state, enum crmd_fsa_state cur_state, int log_level)
 {
     int counter = 0;
     gboolean rc = TRUE;
     const char *when = "lrm disconnect";
 
     GHashTableIter gIter;
     const char *key = NULL;
     rsc_history_t *entry = NULL;
     active_op_t *pending = NULL;
 
     crm_debug("Checking for active resources before exit");
 
     if (cur_state == S_TERMINATE) {
         log_level = LOG_ERR;
         when = "shutdown";
 
     } else if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
         when = "shutdown... waiting";
     }
 
     if ((lrm_state->active_ops != NULL) && lrm_state_is_connected(lrm_state)) {
         guint removed = g_hash_table_foreach_remove(lrm_state->active_ops,
                                                     stop_recurring_actions,
                                                     lrm_state);
         guint nremaining = g_hash_table_size(lrm_state->active_ops);
 
         if (removed || nremaining) {
             crm_notice("Stopped %u recurring operation%s at %s (%u remaining)",
                        removed, pcmk__plural_s(removed), when, nremaining);
         }
     }
 
     if (lrm_state->active_ops != NULL) {
         g_hash_table_iter_init(&gIter, lrm_state->active_ops);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&pending)) {
             /* Ignore recurring actions in the shutdown calculations */
             if (pending->interval_ms == 0) {
                 counter++;
             }
         }
     }
 
     if (counter > 0) {
         do_crm_log(log_level, "%d pending executor operation%s at %s",
                    counter, pcmk__plural_s(counter), when);
 
         if ((cur_state == S_TERMINATE)
             || !pcmk_is_set(controld_globals.fsa_input_register,
                             R_SENT_RSC_STOP)) {
             g_hash_table_iter_init(&gIter, lrm_state->active_ops);
             while (g_hash_table_iter_next(&gIter, (gpointer*)&key, (gpointer*)&pending)) {
                 do_crm_log(log_level, "Pending action: %s (%s)", key, pending->op_key);
             }
 
         } else {
             rc = FALSE;
         }
         return rc;
     }
 
     if (lrm_state->resource_history == NULL) {
         return rc;
     }
 
     if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
         /* At this point we're not waiting, we're just shutting down */
         when = "shutdown";
     }
 
     counter = 0;
     g_hash_table_iter_init(&gIter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&gIter, NULL, (gpointer*)&entry)) {
         if (is_rsc_active(lrm_state, entry->id) == FALSE) {
             continue;
         }
 
         counter++;
         if (log_level == LOG_ERR) {
             crm_info("Found %s active at %s", entry->id, when);
         } else {
             crm_trace("Found %s active at %s", entry->id, when);
         }
         if (lrm_state->active_ops != NULL) {
             GHashTableIter hIter;
 
             g_hash_table_iter_init(&hIter, lrm_state->active_ops);
             while (g_hash_table_iter_next(&hIter, (gpointer*)&key, (gpointer*)&pending)) {
                 if (pcmk__str_eq(entry->id, pending->rsc_id, pcmk__str_none)) {
                     crm_notice("%sction %s (%s) incomplete at %s",
                                pending->interval_ms == 0 ? "A" : "Recurring a",
                                key, pending->op_key, when);
                 }
             }
         }
     }
 
     if (counter) {
         crm_err("%d resource%s active at %s",
                 counter, (counter == 1)? " was" : "s were", when);
     }
 
     return rc;
 }
 
 static gboolean
 is_rsc_active(lrm_state_t * lrm_state, const char *rsc_id)
 {
     rsc_history_t *entry = NULL;
 
     entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
     if (entry == NULL || entry->last == NULL) {
         return FALSE;
     }
 
     crm_trace("Processing %s: %s.%d=%d", rsc_id, entry->last->op_type,
               entry->last->interval_ms, entry->last->rc);
     if ((entry->last->rc == PCMK_OCF_OK)
         && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_STOP,
                         pcmk__str_casei)) {
         return FALSE;
 
     } else if (entry->last->rc == PCMK_OCF_OK
                && pcmk__str_eq(entry->last->op_type, PCMK_ACTION_MIGRATE_TO,
                                pcmk__str_casei)) {
         // A stricter check is too complex ... leave that to the scheduler
         return FALSE;
 
     } else if (entry->last->rc == PCMK_OCF_NOT_RUNNING) {
         return FALSE;
 
     } else if ((entry->last->interval_ms == 0)
                && (entry->last->rc == PCMK_OCF_NOT_CONFIGURED)) {
         /* Badly configured resources can't be reliably stopped */
         return FALSE;
     }
 
     return TRUE;
 }
 
 static gboolean
 build_active_RAs(lrm_state_t * lrm_state, xmlNode * rsc_list)
 {
     GHashTableIter iter;
     rsc_history_t *entry = NULL;
 
     g_hash_table_iter_init(&iter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&entry)) {
 
         GList *gIter = NULL;
         xmlNode *xml_rsc = pcmk__xe_create(rsc_list, PCMK__XE_LRM_RESOURCE);
 
         crm_xml_add(xml_rsc, PCMK_XA_ID, entry->id);
         crm_xml_add(xml_rsc, PCMK_XA_TYPE, entry->rsc.type);
         crm_xml_add(xml_rsc, PCMK_XA_CLASS, entry->rsc.standard);
         crm_xml_add(xml_rsc, PCMK_XA_PROVIDER, entry->rsc.provider);
 
         if (entry->last && entry->last->params) {
             static const char *name = CRM_META "_" PCMK__META_CONTAINER;
             const char *container = g_hash_table_lookup(entry->last->params,
                                                         name);
 
             if (container) {
                 crm_trace("Resource %s is a part of container resource %s", entry->id, container);
                 crm_xml_add(xml_rsc, PCMK__META_CONTAINER, container);
             }
         }
         controld_add_resource_history_xml(xml_rsc, &(entry->rsc), entry->failed,
                                           lrm_state->node_name);
         controld_add_resource_history_xml(xml_rsc, &(entry->rsc), entry->last,
                                           lrm_state->node_name);
         for (gIter = entry->recurring_op_list; gIter != NULL; gIter = gIter->next) {
             controld_add_resource_history_xml(xml_rsc, &(entry->rsc), gIter->data,
                                               lrm_state->node_name);
         }
     }
 
     return FALSE;
 }
 
 xmlNode *
 controld_query_executor_state(void)
 {
     xmlNode *xml_state = NULL;
     xmlNode *xml_data = NULL;
     xmlNode *rsc_list = NULL;
     crm_node_t *peer = NULL;
     lrm_state_t *lrm_state = lrm_state_find(controld_globals.our_nodename);
 
     if (!lrm_state) {
         crm_err("Could not find executor state for node %s",
                 controld_globals.our_nodename);
         return NULL;
     }
 
     peer = pcmk__get_node(0, lrm_state->node_name, NULL, pcmk__node_search_any);
     CRM_CHECK(peer != NULL, return NULL);
 
     xml_state = create_node_state_update(peer,
                                          node_update_cluster|node_update_peer,
                                          NULL, __func__);
     if (xml_state == NULL) {
         return NULL;
     }
 
     xml_data = pcmk__xe_create(xml_state, PCMK__XE_LRM);
     crm_xml_add(xml_data, PCMK_XA_ID, peer->uuid);
     rsc_list = pcmk__xe_create(xml_data, PCMK__XE_LRM_RESOURCES);
 
     /* Build a list of active (not always running) resources */
     build_active_RAs(lrm_state, rsc_list);
 
     crm_log_xml_trace(xml_state, "Current executor state");
 
     return xml_state;
 }
 
 /*!
  * \internal
  * \brief Map standard Pacemaker return code to operation status and OCF code
  *
  * \param[out] event  Executor event whose status and return code should be set
  * \param[in]  rc     Standard Pacemaker return code
  */
 void
 controld_rc2event(lrmd_event_data_t *event, int rc)
 {
     /* This is called for cleanup requests from controller peers/clients, not
      * for resource actions, so no exit reason is needed.
      */
     switch (rc) {
         case pcmk_rc_ok:
             lrmd__set_result(event, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             break;
         case EACCES:
             lrmd__set_result(event, PCMK_OCF_INSUFFICIENT_PRIV,
                              PCMK_EXEC_ERROR, NULL);
             break;
         default:
             lrmd__set_result(event, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR,
                              NULL);
             break;
     }
 }
 
 /*!
  * \internal
  * \brief Trigger a new transition after CIB status was deleted
  *
  * If a CIB status delete was not expected (as part of the transition graph),
  * trigger a new transition by updating the (arbitrary) "last-lrm-refresh"
  * cluster property.
  *
  * \param[in] from_sys  IPC name that requested the delete
  * \param[in] rsc_id    Resource whose status was deleted (for logging only)
  */
 void
 controld_trigger_delete_refresh(const char *from_sys, const char *rsc_id)
 {
     if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_casei)) {
         char *now_s = crm_strdup_printf("%lld", (long long) time(NULL));
 
         crm_debug("Triggering a refresh after %s cleaned %s", from_sys, rsc_id);
         cib__update_node_attr(controld_globals.logger_out,
                               controld_globals.cib_conn, cib_none,
                               PCMK_XE_CRM_CONFIG, NULL, NULL, NULL, NULL,
                               "last-lrm-refresh", now_s, NULL, NULL);
         free(now_s);
     }
 }
 
 static void
 notify_deleted(lrm_state_t * lrm_state, ha_msg_input_t * input, const char *rsc_id, int rc)
 {
     lrmd_event_data_t *op = NULL;
     const char *from_sys = crm_element_value(input->msg, PCMK__XA_CRM_SYS_FROM);
     const char *from_host = crm_element_value(input->msg, PCMK__XA_SRC);
 
     crm_info("Notifying %s on %s that %s was%s deleted",
              from_sys, (from_host? from_host : "localhost"), rsc_id,
              ((rc == pcmk_ok)? "" : " not"));
     op = construct_op(lrm_state, input->xml, rsc_id, PCMK_ACTION_DELETE);
     controld_rc2event(op, pcmk_legacy2rc(rc));
     controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id);
     lrmd_free_event(op);
     controld_trigger_delete_refresh(from_sys, rsc_id);
 }
 
 static gboolean
 lrm_remove_deleted_rsc(gpointer key, gpointer value, gpointer user_data)
 {
     struct delete_event_s *event = user_data;
     struct pending_deletion_op_s *op = value;
 
     if (pcmk__str_eq(event->rsc, op->rsc, pcmk__str_none)) {
         notify_deleted(event->lrm_state, op->input, event->rsc, event->rc);
         return TRUE;
     }
     return FALSE;
 }
 
 static gboolean
 lrm_remove_deleted_op(gpointer key, gpointer value, gpointer user_data)
 {
     const char *rsc = user_data;
     active_op_t *pending = value;
 
     if (pcmk__str_eq(rsc, pending->rsc_id, pcmk__str_none)) {
         crm_info("Removing op %s:%d for deleted resource %s",
                  pending->op_key, pending->call_id, rsc);
         return TRUE;
     }
     return FALSE;
 }
 
 static void
 delete_rsc_entry(lrm_state_t *lrm_state, ha_msg_input_t *input,
                  const char *rsc_id, GHashTableIter *rsc_iter, int rc,
                  const char *user_name, bool from_cib)
 {
     struct delete_event_s event;
 
     CRM_CHECK(rsc_id != NULL, return);
 
     if (rc == pcmk_ok) {
         char *rsc_id_copy = pcmk__str_copy(rsc_id);
 
         if (rsc_iter) {
             g_hash_table_iter_remove(rsc_iter);
         } else {
             g_hash_table_remove(lrm_state->resource_history, rsc_id_copy);
         }
 
         if (from_cib) {
             controld_delete_resource_history(rsc_id_copy, lrm_state->node_name,
                                              user_name, crmd_cib_smart_opt());
         }
         g_hash_table_foreach_remove(lrm_state->active_ops,
                                     lrm_remove_deleted_op, rsc_id_copy);
         free(rsc_id_copy);
     }
 
     if (input) {
         notify_deleted(lrm_state, input, rsc_id, rc);
     }
 
     event.rc = rc;
     event.rsc = rsc_id;
     event.lrm_state = lrm_state;
     g_hash_table_foreach_remove(lrm_state->deletion_ops, lrm_remove_deleted_rsc, &event);
 }
 
 static inline gboolean
 last_failed_matches_op(rsc_history_t *entry, const char *op, guint interval_ms)
 {
     if (entry == NULL) {
         return FALSE;
     }
     if (op == NULL) {
         return TRUE;
     }
     return (pcmk__str_eq(op, entry->failed->op_type, pcmk__str_casei)
             && (interval_ms == entry->failed->interval_ms));
 }
 
 /*!
  * \internal
  * \brief Clear a resource's last failure
  *
  * Erase a resource's last failure on a particular node from both the
  * LRM resource history in the CIB, and the resource history remembered
  * for the LRM state.
  *
  * \param[in] rsc_id      Resource name
  * \param[in] node_name   Node name
  * \param[in] operation   If specified, only clear if matching this operation
  * \param[in] interval_ms If operation is specified, it has this interval
  */
 void
 lrm_clear_last_failure(const char *rsc_id, const char *node_name,
                        const char *operation, guint interval_ms)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (lrm_state == NULL) {
         return;
     }
     if (lrm_state->resource_history != NULL) {
         rsc_history_t *entry = g_hash_table_lookup(lrm_state->resource_history,
                                                    rsc_id);
 
         if (last_failed_matches_op(entry, operation, interval_ms)) {
             lrmd_free_event(entry->failed);
             entry->failed = NULL;
         }
     }
 }
 
 /* Returns: gboolean - cancellation is in progress */
 static gboolean
 cancel_op(lrm_state_t * lrm_state, const char *rsc_id, const char *key, int op, gboolean remove)
 {
     int rc = pcmk_ok;
     char *local_key = NULL;
     active_op_t *pending = NULL;
 
     CRM_CHECK(op != 0, return FALSE);
     CRM_CHECK(rsc_id != NULL, return FALSE);
     if (key == NULL) {
         local_key = make_stop_id(rsc_id, op);
         key = local_key;
     }
     pending = g_hash_table_lookup(lrm_state->active_ops, key);
 
     if (pending) {
         if (remove && !pcmk_is_set(pending->flags, active_op_remove)) {
             controld_set_active_op_flags(pending, active_op_remove);
             crm_debug("Scheduling %s for removal", key);
         }
 
         if (pcmk_is_set(pending->flags, active_op_cancelled)) {
             crm_debug("Operation %s already cancelled", key);
             free(local_key);
             return FALSE;
         }
         controld_set_active_op_flags(pending, active_op_cancelled);
 
     } else {
         crm_info("No pending op found for %s", key);
         free(local_key);
         return FALSE;
     }
 
     crm_debug("Cancelling op %d for %s (%s)", op, rsc_id, key);
     rc = lrm_state_cancel(lrm_state, pending->rsc_id, pending->op_type,
                           pending->interval_ms);
     if (rc == pcmk_ok) {
         crm_debug("Op %d for %s (%s): cancelled", op, rsc_id, key);
         free(local_key);
         return TRUE;
     }
 
     crm_debug("Op %d for %s (%s): Nothing to cancel", op, rsc_id, key);
     /* The caller needs to make sure the entry is
      * removed from the active operations list
      *
      * Usually by returning TRUE inside the worker function
      * supplied to g_hash_table_foreach_remove()
      *
      * Not removing the entry from active operations will block
      * the node from shutting down
      */
     free(local_key);
     return FALSE;
 }
 
 struct cancel_data {
     gboolean done;
     gboolean remove;
     const char *key;
     lrmd_rsc_info_t *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean
 cancel_action_by_key(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     struct cancel_data *data = user_data;
     active_op_t *op = value;
 
     if (pcmk__str_eq(op->op_key, data->key, pcmk__str_none)) {
         data->done = TRUE;
         remove = !cancel_op(data->lrm_state, data->rsc->id, key, op->call_id, data->remove);
     }
     return remove;
 }
 
 static gboolean
 cancel_op_key(lrm_state_t * lrm_state, lrmd_rsc_info_t * rsc, const char *key, gboolean remove)
 {
     guint removed = 0;
     struct cancel_data data;
 
     CRM_CHECK(rsc != NULL, return FALSE);
     CRM_CHECK(key != NULL, return FALSE);
 
     data.key = key;
     data.rsc = rsc;
     data.done = FALSE;
     data.remove = remove;
     data.lrm_state = lrm_state;
 
     removed = g_hash_table_foreach_remove(lrm_state->active_ops,
                                           cancel_action_by_key, &data);
     crm_trace("Removed %u op cache entries, new size: %u",
               removed, g_hash_table_size(lrm_state->active_ops));
     return data.done;
 }
 
 /*!
  * \internal
  * \brief Retrieve resource information from LRM
  *
  * \param[in,out]  lrm_state  Executor connection state to use
  * \param[in]      rsc_xml    XML containing resource configuration
  * \param[in]      do_create  If true, register resource if not already
  * \param[out]     rsc_info   Where to store information obtained from executor
  *
  * \retval pcmk_ok   Success (and rsc_info holds newly allocated result)
  * \retval -EINVAL   Required information is missing from arguments
  * \retval -ENOTCONN No active connection to LRM
  * \retval -ENODEV   Resource not found
  * \retval -errno    Error communicating with executor when registering resource
  *
  * \note Caller is responsible for freeing result on success.
  */
 static int
 get_lrm_resource(lrm_state_t *lrm_state, const xmlNode *rsc_xml,
                  gboolean do_create, lrmd_rsc_info_t **rsc_info)
 {
     const char *id = pcmk__xe_id(rsc_xml);
 
     CRM_CHECK(lrm_state && rsc_xml && rsc_info, return -EINVAL);
     CRM_CHECK(id, return -EINVAL);
 
     if (lrm_state_is_connected(lrm_state) == FALSE) {
         return -ENOTCONN;
     }
 
     crm_trace("Retrieving resource information for %s from the executor", id);
     *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0);
 
     // If resource isn't known by ID, try clone name, if provided
     if (!*rsc_info) {
         const char *long_id = crm_element_value(rsc_xml, PCMK__XA_LONG_ID);
 
         if (long_id) {
             *rsc_info = lrm_state_get_rsc_info(lrm_state, long_id, 0);
         }
     }
 
     if ((*rsc_info == NULL) && do_create) {
         const char *class = crm_element_value(rsc_xml, PCMK_XA_CLASS);
         const char *provider = crm_element_value(rsc_xml, PCMK_XA_PROVIDER);
         const char *type = crm_element_value(rsc_xml, PCMK_XA_TYPE);
         int rc;
 
         crm_trace("Registering resource %s with the executor", id);
         rc = lrm_state_register_rsc(lrm_state, id, class, provider, type,
                                     lrmd_opt_drop_recurring);
         if (rc != pcmk_ok) {
             fsa_data_t *msg_data = NULL;
 
             crm_err("Could not register resource %s with the executor on %s: %s "
                     QB_XS " rc=%d",
                     id, lrm_state->node_name, pcmk_strerror(rc), rc);
 
             /* Register this as an internal error if this involves the local
              * executor. Otherwise, we're likely dealing with an unresponsive
              * remote node, which is not an FSA failure.
              */
             if (lrm_state_is_local(lrm_state) == TRUE) {
                 register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
             }
             return rc;
         }
 
         *rsc_info = lrm_state_get_rsc_info(lrm_state, id, 0);
     }
     return *rsc_info? pcmk_ok : -ENODEV;
 }
 
 static void
 delete_resource(lrm_state_t *lrm_state, const char *id, lrmd_rsc_info_t *rsc,
                 GHashTableIter *iter, const char *sys, const char *user,
                 ha_msg_input_t *request, bool unregister, bool from_cib)
 {
     int rc = pcmk_ok;
 
     crm_info("Removing resource %s from executor for %s%s%s",
              id, sys, (user? " as " : ""), (user? user : ""));
 
     if (rsc && unregister) {
         rc = lrm_state_unregister_rsc(lrm_state, id, 0);
     }
 
     if (rc == pcmk_ok) {
         crm_trace("Resource %s deleted from executor", id);
     } else if (rc == -EINPROGRESS) {
         crm_info("Deletion of resource '%s' from executor is pending", id);
         if (request) {
             struct pending_deletion_op_s *op = NULL;
             char *ref = crm_element_value_copy(request->msg, PCMK_XA_REFERENCE);
 
             op = pcmk__assert_alloc(1, sizeof(struct pending_deletion_op_s));
             op->rsc = pcmk__str_copy(rsc->id);
             op->input = copy_ha_msg_input(request);
             g_hash_table_insert(lrm_state->deletion_ops, ref, op);
         }
         return;
     } else {
         crm_warn("Could not delete '%s' from executor for %s%s%s: %s "
                  QB_XS " rc=%d", id, sys, (user? " as " : ""),
                  (user? user : ""), pcmk_strerror(rc), rc);
     }
 
     delete_rsc_entry(lrm_state, request, id, iter, rc, user, from_cib);
 }
 
 static int
 get_fake_call_id(lrm_state_t *lrm_state, const char *rsc_id)
 {
     int call_id = 999999999;
     rsc_history_t *entry = NULL;
 
     if(lrm_state) {
         entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
     }
 
     /* Make sure the call id is greater than the last successful operation,
      * otherwise the failure will not result in a possible recovery of the resource
      * as it could appear the failure occurred before the successful start */
     if (entry) {
         call_id = entry->last_callid + 1;
     }
 
     if (call_id < 0) {
         call_id = 1;
     }
     return call_id;
 }
 
 static void
 fake_op_status(lrm_state_t *lrm_state, lrmd_event_data_t *op, int op_status,
                enum ocf_exitcode op_exitcode, const char *exit_reason)
 {
     op->call_id = get_fake_call_id(lrm_state, op->rsc_id);
     op->t_run = time(NULL);
-    op->t_rcchange = (unsigned int) op->t_run;
+    op->t_rcchange = op->t_run;
     lrmd__set_result(op, op_exitcode, op_status, exit_reason);
 }
 
 static void
 force_reprobe(lrm_state_t *lrm_state, const char *from_sys,
               const char *from_host, const char *user_name,
               gboolean is_remote_node, bool reprobe_all_nodes)
 {
     GHashTableIter gIter;
     rsc_history_t *entry = NULL;
 
     crm_info("Clearing resource history on node %s", lrm_state->node_name);
     g_hash_table_iter_init(&gIter, lrm_state->resource_history);
     while (g_hash_table_iter_next(&gIter, NULL, (void **)&entry)) {
         /* only unregister the resource during a reprobe if it is not a remote connection
          * resource. otherwise unregistering the connection will terminate remote-node
          * membership */
         bool unregister = true;
 
         if (is_remote_lrmd_ra(NULL, NULL, entry->id)) {
             unregister = false;
 
             if (reprobe_all_nodes) {
                 lrm_state_t *remote_lrm_state = lrm_state_find(entry->id);
 
                 if (remote_lrm_state != NULL) {
                     /* If reprobing all nodes, be sure to reprobe the remote
                      * node before clearing its connection resource
                      */
                     force_reprobe(remote_lrm_state, from_sys, from_host,
                                   user_name, TRUE, reprobe_all_nodes);
                 }
             }
         }
 
         /* Don't delete from the CIB, since we'll delete the whole node's LRM
          * state from the CIB soon
          */
         delete_resource(lrm_state, entry->id, &entry->rsc, &gIter, from_sys,
                         user_name, NULL, unregister, false);
     }
 
     /* Now delete the copy in the CIB */
     controld_delete_node_state(lrm_state->node_name, controld_section_lrm,
                                cib_none);
 }
 
 /*!
  * \internal
  * \brief Fail a requested action without actually executing it
  *
  * For an action that can't be executed, process it similarly to an actual
  * execution result, with specified error status (except for notify actions,
  * which will always be treated as successful).
  *
  * \param[in,out] lrm_state    Executor connection that action is for
  * \param[in]     action       Action XML from request
  * \param[in]     rc           Desired return code to use
  * \param[in]     op_status    Desired operation status to use
  * \param[in]     exit_reason  Human-friendly detail, if error
  */
 static void
 synthesize_lrmd_failure(lrm_state_t *lrm_state, const xmlNode *action,
                         int op_status, enum ocf_exitcode rc,
                         const char *exit_reason)
 {
     lrmd_event_data_t *op = NULL;
     const char *operation = crm_element_value(action, PCMK_XA_OPERATION);
     const char *target_node = crm_element_value(action, PCMK__META_ON_NODE);
     xmlNode *xml_rsc = pcmk__xe_first_child(action, PCMK_XE_PRIMITIVE, NULL,
                                             NULL);
 
     if ((xml_rsc == NULL) || (pcmk__xe_id(xml_rsc) == NULL)) {
         /* @TODO Should we do something else, like direct ack? */
         crm_info("Can't fake %s failure (%d) on %s without resource configuration",
                  crm_element_value(action, PCMK__XA_OPERATION_KEY), rc,
                  target_node);
         return;
 
     } else if(operation == NULL) {
         /* This probably came from crm_resource -C, nothing to do */
         crm_info("Can't fake %s failure (%d) on %s without operation",
                  pcmk__xe_id(xml_rsc), rc, target_node);
         return;
     }
 
     op = construct_op(lrm_state, action, pcmk__xe_id(xml_rsc), operation);
 
     if (pcmk__str_eq(operation, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
         // Notifications can't fail
         fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_OK, NULL);
     } else {
         fake_op_status(lrm_state, op, op_status, rc, exit_reason);
     }
 
     crm_info("Faking " PCMK__OP_FMT " result (%d) on %s",
              op->rsc_id, op->op_type, op->interval_ms, op->rc, target_node);
 
     // Process the result as if it came from the LRM
     process_lrm_event(lrm_state, op, NULL, action);
     lrmd_free_event(op);
 }
 
 /*!
  * \internal
  * \brief Get target of an LRM operation (replacing \p NULL with local node
  *        name)
  *
  * \param[in] xml  LRM operation data XML
  *
  * \return LRM operation target node name (local node or Pacemaker Remote node)
  */
 static const char *
 lrm_op_target(const xmlNode *xml)
 {
     const char *target = NULL;
 
     if (xml) {
         target = crm_element_value(xml, PCMK__META_ON_NODE);
     }
     if (target == NULL) {
         target = controld_globals.our_nodename;
     }
     return target;
 }
 
 static void
 fail_lrm_resource(xmlNode *xml, lrm_state_t *lrm_state, const char *user_name,
                   const char *from_host, const char *from_sys)
 {
     lrmd_event_data_t *op = NULL;
     lrmd_rsc_info_t *rsc = NULL;
     xmlNode *xml_rsc = pcmk__xe_first_child(xml, PCMK_XE_PRIMITIVE, NULL, NULL);
 
     CRM_CHECK(xml_rsc != NULL, return);
 
     /* The executor simply executes operations and reports the results, without
      * any concept of success or failure, so to fail a resource, we must fake
      * what a failure looks like.
      *
      * To do this, we create a fake executor operation event for the resource,
      * and pass that event to the executor client callback so it will be
      * processed as if it came from the executor.
      */
     op = construct_op(lrm_state, xml, pcmk__xe_id(xml_rsc), "asyncmon");
 
     free((char*) op->user_data);
     op->user_data = NULL;
     op->interval_ms = 0;
 
     if (user_name && !pcmk__is_privileged(user_name)) {
         crm_err("%s does not have permission to fail %s",
                 user_name, pcmk__xe_id(xml_rsc));
         fake_op_status(lrm_state, op, PCMK_EXEC_ERROR,
                        PCMK_OCF_INSUFFICIENT_PRIV,
                        "Unprivileged user cannot fail resources");
         controld_ack_event_directly(from_host, from_sys, NULL, op,
                                     pcmk__xe_id(xml_rsc));
         lrmd_free_event(op);
         return;
     }
 
 
     if (get_lrm_resource(lrm_state, xml_rsc, TRUE, &rsc) == pcmk_ok) {
         crm_info("Failing resource %s...", rsc->id);
         fake_op_status(lrm_state, op, PCMK_EXEC_DONE, PCMK_OCF_UNKNOWN_ERROR,
                        "Simulated failure");
         process_lrm_event(lrm_state, op, NULL, xml);
         op->rc = PCMK_OCF_OK; // The request to fail the resource succeeded
         lrmd_free_rsc_info(rsc);
 
     } else {
         crm_info("Cannot find/create resource in order to fail it...");
         crm_log_xml_warn(xml, "bad input");
         fake_op_status(lrm_state, op, PCMK_EXEC_ERROR, PCMK_OCF_UNKNOWN_ERROR,
                        "Cannot fail unknown resource");
     }
 
     controld_ack_event_directly(from_host, from_sys, NULL, op,
                                 pcmk__xe_id(xml_rsc));
     lrmd_free_event(op);
 }
 
 static void
 handle_reprobe_op(lrm_state_t *lrm_state, const char *from_sys,
                   const char *from_host, const char *user_name,
                   gboolean is_remote_node, bool reprobe_all_nodes)
 {
     crm_notice("Forcing the status of all resources to be redetected");
     force_reprobe(lrm_state, from_sys, from_host, user_name, is_remote_node,
                   reprobe_all_nodes);
 
     if (!pcmk__strcase_any_of(from_sys, CRM_SYSTEM_PENGINE, CRM_SYSTEM_TENGINE, NULL)) {
 
         xmlNode *reply = create_request(CRM_OP_INVOKE_LRM, NULL, from_host,
                                         from_sys, CRM_SYSTEM_LRMD,
                                         controld_globals.our_uuid);
 
         crm_debug("ACK'ing re-probe from %s (%s)", from_sys, from_host);
 
         if (relay_message(reply, TRUE) == FALSE) {
             crm_log_xml_err(reply, "Unable to route reply");
         }
         pcmk__xml_free(reply);
     }
 }
 
 static bool do_lrm_cancel(ha_msg_input_t *input, lrm_state_t *lrm_state,
               lrmd_rsc_info_t *rsc, const char *from_host, const char *from_sys)
 {
     char *op_key = NULL;
     char *meta_key = NULL;
     int call = 0;
     const char *call_id = NULL;
     const char *op_task = NULL;
     guint interval_ms = 0;
     gboolean in_progress = FALSE;
     xmlNode *params = pcmk__xe_first_child(input->xml, PCMK__XE_ATTRIBUTES,
                                            NULL, NULL);
 
     CRM_CHECK(params != NULL, return FALSE);
 
     meta_key = crm_meta_name(PCMK_XA_OPERATION);
     op_task = crm_element_value(params, meta_key);
     free(meta_key);
     CRM_CHECK(op_task != NULL, return FALSE);
 
     meta_key = crm_meta_name(PCMK_META_INTERVAL);
     if (crm_element_value_ms(params, meta_key, &interval_ms) != pcmk_ok) {
         free(meta_key);
         return FALSE;
     }
     free(meta_key);
 
     op_key = pcmk__op_key(rsc->id, op_task, interval_ms);
 
     meta_key = crm_meta_name(PCMK__XA_CALL_ID);
     call_id = crm_element_value(params, meta_key);
     free(meta_key);
 
     crm_debug("Scheduler requested op %s (call=%s) be cancelled",
               op_key, (call_id? call_id : "NA"));
     pcmk__scan_min_int(call_id, &call, 0);
     if (call == 0) {
         // Normal case when the scheduler cancels a recurring op
         in_progress = cancel_op_key(lrm_state, rsc, op_key, TRUE);
 
     } else {
         // Normal case when the scheduler cancels an orphan op
         in_progress = cancel_op(lrm_state, rsc->id, NULL, call, TRUE);
     }
 
     // Acknowledge cancellation operation if for a remote connection resource
     if (!in_progress || is_remote_lrmd_ra(NULL, NULL, rsc->id)) {
         char *op_id = make_stop_id(rsc->id, call);
 
         if (is_remote_lrmd_ra(NULL, NULL, rsc->id) == FALSE) {
             crm_info("Nothing known about operation %d for %s", call, op_key);
         }
         controld_delete_action_history_by_key(rsc->id, lrm_state->node_name,
                                               op_key, call);
         send_task_ok_ack(lrm_state, input, rsc->id, rsc, op_task,
                          from_host, from_sys);
 
         /* needed at least for cancellation of a remote operation */
         if (lrm_state->active_ops != NULL) {
             g_hash_table_remove(lrm_state->active_ops, op_id);
         }
         free(op_id);
     }
 
     free(op_key);
     return TRUE;
 }
 
 static void
 do_lrm_delete(ha_msg_input_t *input, lrm_state_t *lrm_state,
               lrmd_rsc_info_t *rsc, const char *from_sys, const char *from_host,
               bool crm_rsc_delete, const char *user_name)
 {
     bool unregister = true;
     int cib_rc = controld_delete_resource_history(rsc->id, lrm_state->node_name,
                                                   user_name,
                                                   cib_dryrun|cib_sync_call);
 
     if (cib_rc != pcmk_rc_ok) {
         lrmd_event_data_t *op = NULL;
 
         op = construct_op(lrm_state, input->xml, rsc->id, PCMK_ACTION_DELETE);
 
         /* These are resource clean-ups, not actions, so no exit reason is
          * needed.
          */
         lrmd__set_result(op, pcmk_rc2ocf(cib_rc), PCMK_EXEC_ERROR, NULL);
         controld_ack_event_directly(from_host, from_sys, NULL, op, rsc->id);
         lrmd_free_event(op);
         return;
     }
 
     if (crm_rsc_delete && is_remote_lrmd_ra(NULL, NULL, rsc->id)) {
         unregister = false;
     }
 
     delete_resource(lrm_state, rsc->id, rsc, NULL, from_sys,
                     user_name, input, unregister, true);
 }
 
 // User data for asynchronous metadata execution
 struct metadata_cb_data {
     lrmd_rsc_info_t *rsc;   // Copy of resource information
     xmlNode *input_xml;     // Copy of FSA input XML
 };
 
 static struct metadata_cb_data *
 new_metadata_cb_data(lrmd_rsc_info_t *rsc, xmlNode *input_xml)
 {
     struct metadata_cb_data *data = NULL;
 
     data = pcmk__assert_alloc(1, sizeof(struct metadata_cb_data));
     data->input_xml = pcmk__xml_copy(NULL, input_xml);
     data->rsc = lrmd_copy_rsc_info(rsc);
     return data;
 }
 
 static void
 free_metadata_cb_data(struct metadata_cb_data *data)
 {
     lrmd_free_rsc_info(data->rsc);
     pcmk__xml_free(data->input_xml);
     free(data);
 }
 
 /*!
  * \internal
  * \brief Execute an action after metadata has been retrieved
  *
  * \param[in] pid        Ignored
  * \param[in] result     Result of metadata action
  * \param[in] user_data  Metadata callback data
  */
 static void
 metadata_complete(int pid, const pcmk__action_result_t *result, void *user_data)
 {
     struct metadata_cb_data *data = (struct metadata_cb_data *) user_data;
 
     struct ra_metadata_s *md = NULL;
     lrm_state_t *lrm_state = lrm_state_find(lrm_op_target(data->input_xml));
 
     if ((lrm_state != NULL) && pcmk__result_ok(result)) {
         md = controld_cache_metadata(lrm_state->metadata_cache, data->rsc,
                                      result->action_stdout);
     }
     if (!pcmk_is_set(controld_globals.fsa_input_register, R_HA_DISCONNECTED)) {
         do_lrm_rsc_op(lrm_state, data->rsc, data->input_xml, md);
     }
     free_metadata_cb_data(data);
 }
 
 /*	 A_LRM_INVOKE	*/
 void
 do_lrm_invoke(long long action,
               enum crmd_fsa_cause cause,
               enum crmd_fsa_state cur_state,
               enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     lrm_state_t *lrm_state = NULL;
     const char *crm_op = NULL;
     const char *from_sys = NULL;
     const char *from_host = NULL;
     const char *operation = NULL;
     ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
     const char *user_name = NULL;
     const char *target_node = lrm_op_target(input->xml);
     gboolean is_remote_node = FALSE;
     bool crm_rsc_delete = FALSE;
 
     // Message routed to the local node is targeting a specific, non-local node
     is_remote_node = !pcmk__str_eq(target_node, controld_globals.our_nodename,
                                    pcmk__str_casei);
 
     lrm_state = lrm_state_find(target_node);
     if ((lrm_state == NULL) && is_remote_node) {
         crm_err("Failing action because local node has never had connection to remote node %s",
                 target_node);
         synthesize_lrmd_failure(NULL, input->xml, PCMK_EXEC_NOT_CONNECTED,
                                 PCMK_OCF_UNKNOWN_ERROR,
                                 "Local node has no connection to remote");
         return;
     }
     CRM_ASSERT(lrm_state != NULL);
 
     user_name = pcmk__update_acl_user(input->msg, PCMK__XA_CRM_USER, NULL);
     crm_op = crm_element_value(input->msg, PCMK__XA_CRM_TASK);
     from_sys = crm_element_value(input->msg, PCMK__XA_CRM_SYS_FROM);
     if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) {
         from_host = crm_element_value(input->msg, PCMK__XA_SRC);
     }
 
     if (pcmk__str_eq(crm_op, PCMK_ACTION_LRM_DELETE, pcmk__str_none)) {
         if (!pcmk__str_eq(from_sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) {
             crm_rsc_delete = TRUE; // from crm_resource
         }
         operation = PCMK_ACTION_DELETE;
 
     } else if (input->xml != NULL) {
         operation = crm_element_value(input->xml, PCMK_XA_OPERATION);
     }
 
     CRM_CHECK(!pcmk__str_empty(crm_op) || !pcmk__str_empty(operation), return);
 
     crm_trace("'%s' execution request from %s as %s user",
               pcmk__s(crm_op, operation),
               pcmk__s(from_sys, "unknown subsystem"),
               pcmk__s(user_name, "current"));
 
     if (pcmk__str_eq(crm_op, CRM_OP_LRM_FAIL, pcmk__str_none)) {
         fail_lrm_resource(input->xml, lrm_state, user_name, from_host,
                           from_sys);
 
     } else if (pcmk__str_eq(crm_op, CRM_OP_REPROBE, pcmk__str_none)
                || pcmk__str_eq(operation, CRM_OP_REPROBE, pcmk__str_none)) {
         const char *raw_target = NULL;
 
         if (input->xml != NULL) {
             // For CRM_OP_REPROBE, a NULL target means we're targeting all nodes
             raw_target = crm_element_value(input->xml, PCMK__META_ON_NODE);
         }
         handle_reprobe_op(lrm_state, from_sys, from_host, user_name,
                           is_remote_node, (raw_target == NULL));
 
     } else if (operation != NULL) {
         lrmd_rsc_info_t *rsc = NULL;
         xmlNode *xml_rsc = pcmk__xe_first_child(input->xml, PCMK_XE_PRIMITIVE,
                                                 NULL, NULL);
         gboolean create_rsc = !pcmk__str_eq(operation, PCMK_ACTION_DELETE,
                                             pcmk__str_none);
         int rc;
 
         // We can't return anything meaningful without a resource ID
         CRM_CHECK((xml_rsc != NULL) && (pcmk__xe_id(xml_rsc) != NULL), return);
 
         rc = get_lrm_resource(lrm_state, xml_rsc, create_rsc, &rsc);
         if (rc == -ENOTCONN) {
             synthesize_lrmd_failure(lrm_state, input->xml,
                                     PCMK_EXEC_NOT_CONNECTED,
                                     PCMK_OCF_UNKNOWN_ERROR,
                                     "Not connected to remote executor");
             return;
 
         } else if ((rc < 0) && !create_rsc) {
             /* Delete of malformed or nonexistent resource
              * (deleting something that does not exist is a success)
              */
             crm_notice("Not registering resource '%s' for a %s event "
                        QB_XS " get-rc=%d (%s) transition-key=%s",
                        pcmk__xe_id(xml_rsc), operation,
                        rc, pcmk_strerror(rc), pcmk__xe_id(input->xml));
             delete_rsc_entry(lrm_state, input, pcmk__xe_id(xml_rsc), NULL,
                              pcmk_ok, user_name, true);
             return;
 
         } else if (rc == -EINVAL) {
             // Resource operation on malformed resource
             crm_err("Invalid resource definition for %s", pcmk__xe_id(xml_rsc));
             crm_log_xml_warn(input->msg, "invalid resource");
             synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR,
                                     PCMK_OCF_NOT_CONFIGURED, // fatal error
                                     "Invalid resource definition");
             return;
 
         } else if (rc < 0) {
             // Error communicating with the executor
             crm_err("Could not register resource '%s' with executor: %s "
                     QB_XS " rc=%d",
                     pcmk__xe_id(xml_rsc), pcmk_strerror(rc), rc);
             crm_log_xml_warn(input->msg, "failed registration");
             synthesize_lrmd_failure(lrm_state, input->xml, PCMK_EXEC_ERROR,
                                     PCMK_OCF_INVALID_PARAM, // hard error
                                     "Could not register resource with executor");
             return;
         }
 
         if (pcmk__str_eq(operation, PCMK_ACTION_CANCEL, pcmk__str_none)) {
             if (!do_lrm_cancel(input, lrm_state, rsc, from_host, from_sys)) {
                 crm_log_xml_warn(input->xml, "Bad command");
             }
 
         } else if (pcmk__str_eq(operation, PCMK_ACTION_DELETE,
                                 pcmk__str_none)) {
             do_lrm_delete(input, lrm_state, rsc, from_sys, from_host,
                           crm_rsc_delete, user_name);
 
         } else {
             struct ra_metadata_s *md = NULL;
 
             /* Getting metadata from cache is OK except for start actions --
              * always refresh from the agent for those, in case the resource
              * agent was updated.
              *
              * @TODO Only refresh metadata for starts if the agent actually
              * changed (using something like inotify, or a hash or modification
              * time of the agent executable).
              */
             if (strcmp(operation, PCMK_ACTION_START) != 0) {
                 md = controld_get_rsc_metadata(lrm_state, rsc,
                                                controld_metadata_from_cache);
             }
 
             if ((md == NULL) && crm_op_needs_metadata(rsc->standard,
                                                       operation)) {
                 /* Most likely, we'll need the agent metadata to record the
                  * pending operation and the operation result. Get it now rather
                  * than wait until then, so the metadata action doesn't eat into
                  * the real action's timeout.
                  *
                  * @TODO Metadata is retrieved via direct execution of the
                  * agent, which has a couple of related issues: the executor
                  * should execute agents, not the controller; and metadata for
                  * Pacemaker Remote nodes should be collected on those nodes,
                  * not locally.
                  */
                 struct metadata_cb_data *data = NULL;
 
                 data = new_metadata_cb_data(rsc, input->xml);
                 crm_info("Retrieving metadata for %s (%s%s%s:%s) asynchronously",
                          rsc->id, rsc->standard,
                          ((rsc->provider == NULL)? "" : ":"),
                          ((rsc->provider == NULL)? "" : rsc->provider),
                          rsc->type);
                 (void) lrmd__metadata_async(rsc, metadata_complete,
                                             (void *) data);
             } else {
                 do_lrm_rsc_op(lrm_state, rsc, input->xml, md);
             }
         }
 
         lrmd_free_rsc_info(rsc);
 
     } else {
         crm_err("Invalid execution request: unknown command '%s' (bug?)",
                 crm_op);
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 static lrmd_event_data_t *
 construct_op(const lrm_state_t *lrm_state, const xmlNode *rsc_op,
              const char *rsc_id, const char *operation)
 {
     lrmd_event_data_t *op = NULL;
     const char *op_delay = NULL;
     const char *op_timeout = NULL;
     GHashTable *params = NULL;
 
     xmlNode *primitive = NULL;
     const char *class = NULL;
 
     const char *transition = NULL;
 
     CRM_ASSERT(rsc_id && operation);
 
     op = lrmd_new_event(rsc_id, operation, 0);
     op->type = lrmd_event_exec_complete;
     op->timeout = 0;
     op->start_delay = 0;
     lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL);
 
     if (rsc_op == NULL) {
         CRM_LOG_ASSERT(pcmk__str_eq(operation, PCMK_ACTION_STOP,
                                     pcmk__str_casei));
         op->user_data = NULL;
         /* the stop_all_resources() case
          * by definition there is no DC (or they'd be shutting
          *   us down).
          * So we should put our version here.
          */
         op->params = pcmk__strkey_table(free, free);
 
         pcmk__insert_dup(op->params, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
 
         crm_trace("Constructed %s op for %s", operation, rsc_id);
         return op;
     }
 
     params = xml2list(rsc_op);
     g_hash_table_remove(params, CRM_META "_" PCMK__META_OP_TARGET_RC);
 
     op_delay = crm_meta_value(params, PCMK_META_START_DELAY);
     pcmk__scan_min_int(op_delay, &op->start_delay, 0);
 
     op_timeout = crm_meta_value(params, PCMK_META_TIMEOUT);
     pcmk__scan_min_int(op_timeout, &op->timeout, 0);
 
     if (pcmk__guint_from_hash(params, CRM_META "_" PCMK_META_INTERVAL, 0,
                               &(op->interval_ms)) != pcmk_rc_ok) {
         op->interval_ms = 0;
     }
 
     /* Use pcmk_monitor_timeout instead of meta timeout for stonith
        recurring monitor, if set */
     primitive = pcmk__xe_first_child(rsc_op, PCMK_XE_PRIMITIVE, NULL, NULL);
     class = crm_element_value(primitive, PCMK_XA_CLASS);
 
     if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_fence_params)
             && pcmk__str_eq(operation, PCMK_ACTION_MONITOR, pcmk__str_casei)
             && (op->interval_ms > 0)) {
 
         op_timeout = g_hash_table_lookup(params, "pcmk_monitor_timeout");
         if (op_timeout != NULL) {
             long long timeout_ms = crm_get_msec(op_timeout);
 
             op->timeout = (int) QB_MIN(timeout_ms, INT_MAX);
         }
     }
 
     if (!pcmk__str_eq(operation, PCMK_ACTION_STOP, pcmk__str_casei)) {
         op->params = params;
 
     } else {
         rsc_history_t *entry = NULL;
 
         if (lrm_state) {
             entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
         }
 
         /* If we do not have stop parameters cached, use
          * whatever we are given */
         if (!entry || !entry->stop_params) {
             op->params = params;
         } else {
             /* Copy the cached parameter list so that we stop the resource
              * with the old attributes, not the new ones */
             op->params = pcmk__strkey_table(free, free);
 
             g_hash_table_foreach(params, copy_meta_keys, op->params);
             g_hash_table_foreach(entry->stop_params, copy_instance_keys, op->params);
             g_hash_table_destroy(params);
             params = NULL;
         }
     }
 
     /* sanity */
     if (op->timeout <= 0) {
         op->timeout = op->interval_ms;
     }
     if (op->start_delay < 0) {
         op->start_delay = 0;
     }
 
     transition = crm_element_value(rsc_op, PCMK__XA_TRANSITION_KEY);
     CRM_CHECK(transition != NULL, return op);
 
     op->user_data = pcmk__str_copy(transition);
 
     if (op->interval_ms != 0) {
         if (pcmk__strcase_any_of(operation, PCMK_ACTION_START, PCMK_ACTION_STOP,
                                  NULL)) {
             crm_err("Start and Stop actions cannot have an interval: %u",
                     op->interval_ms);
             op->interval_ms = 0;
         }
     }
 
     crm_trace("Constructed %s op for %s: interval=%u",
               operation, rsc_id, op->interval_ms);
 
     return op;
 }
 
 /*!
  * \internal
  * \brief Send a (synthesized) event result
  *
  * Reply with a synthesized event result directly, as opposed to going through
  * the executor.
  *
  * \param[in]     to_host  Host to send result to
  * \param[in]     to_sys   IPC name to send result (NULL for transition engine)
  * \param[in]     rsc      Type information about resource the result is for
  * \param[in,out] op       Event with result to send
  * \param[in]     rsc_id   ID of resource the result is for
  */
 void
 controld_ack_event_directly(const char *to_host, const char *to_sys,
                             const lrmd_rsc_info_t *rsc, lrmd_event_data_t *op,
                             const char *rsc_id)
 {
     xmlNode *reply = NULL;
     xmlNode *update, *iter;
     crm_node_t *peer = NULL;
 
     CRM_CHECK(op != NULL, return);
     if (op->rsc_id == NULL) {
         // op->rsc_id is a (const char *) but lrmd_free_event() frees it
         CRM_ASSERT(rsc_id != NULL);
         op->rsc_id = pcmk__str_copy(rsc_id);
     }
     if (to_sys == NULL) {
         to_sys = CRM_SYSTEM_TENGINE;
     }
 
     peer = pcmk__get_node(0, controld_globals.our_nodename, NULL,
                           pcmk__node_search_cluster_member);
     update = create_node_state_update(peer, node_update_none, NULL,
                                       __func__);
 
     iter = pcmk__xe_create(update, PCMK__XE_LRM);
     crm_xml_add(iter, PCMK_XA_ID, controld_globals.our_uuid);
     iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCES);
     iter = pcmk__xe_create(iter, PCMK__XE_LRM_RESOURCE);
 
     crm_xml_add(iter, PCMK_XA_ID, op->rsc_id);
 
     controld_add_resource_history_xml(iter, rsc, op,
                                       controld_globals.our_nodename);
     reply = create_request(CRM_OP_INVOKE_LRM, update, to_host, to_sys, CRM_SYSTEM_LRMD, NULL);
 
     crm_log_xml_trace(update, "[direct ACK]");
 
     crm_debug("ACK'ing resource op " PCMK__OP_FMT " from %s: %s",
               op->rsc_id, op->op_type, op->interval_ms, op->user_data,
               crm_element_value(reply, PCMK_XA_REFERENCE));
 
     if (relay_message(reply, TRUE) == FALSE) {
         crm_log_xml_err(reply, "Unable to route reply");
     }
 
     pcmk__xml_free(update);
     pcmk__xml_free(reply);
 }
 
 gboolean
 verify_stopped(enum crmd_fsa_state cur_state, int log_level)
 {
     gboolean res = TRUE;
     GList *lrm_state_list = lrm_state_get_list();
     GList *state_entry;
 
     for (state_entry = lrm_state_list; state_entry != NULL; state_entry = state_entry->next) {
         lrm_state_t *lrm_state = state_entry->data;
 
         if (!lrm_state_verify_stopped(lrm_state, cur_state, log_level)) {
             /* keep iterating through all even when false is returned */
             res = FALSE;
         }
     }
 
     controld_set_fsa_input_flags(R_SENT_RSC_STOP);
     g_list_free(lrm_state_list); lrm_state_list = NULL;
     return res;
 }
 
 struct stop_recurring_action_s {
     lrmd_rsc_info_t *rsc;
     lrm_state_t *lrm_state;
 };
 
 static gboolean
 stop_recurring_action_by_rsc(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     struct stop_recurring_action_s *event = user_data;
     active_op_t *op = value;
 
     if ((op->interval_ms != 0)
         && pcmk__str_eq(op->rsc_id, event->rsc->id, pcmk__str_none)) {
 
         crm_debug("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id, (char*)key);
         remove = !cancel_op(event->lrm_state, event->rsc->id, key, op->call_id, FALSE);
     }
 
     return remove;
 }
 
 static gboolean
 stop_recurring_actions(gpointer key, gpointer value, gpointer user_data)
 {
     gboolean remove = FALSE;
     lrm_state_t *lrm_state = user_data;
     active_op_t *op = value;
 
     if (op->interval_ms != 0) {
         crm_info("Cancelling op %d for %s (%s)", op->call_id, op->rsc_id,
                  (const char *) key);
         remove = !cancel_op(lrm_state, op->rsc_id, key, op->call_id, FALSE);
     }
 
     return remove;
 }
 
 /*!
  * \internal
  * \brief Check whether recurring actions should be cancelled before an action
  *
  * \param[in] rsc_id       Resource that action is for
  * \param[in] action       Action being performed
  * \param[in] interval_ms  Operation interval of \p action (in milliseconds)
  *
  * \return true if recurring actions should be cancelled, otherwise false
  */
 static bool
 should_cancel_recurring(const char *rsc_id, const char *action, guint interval_ms)
 {
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id) && (interval_ms == 0)
         && (strcmp(action, PCMK_ACTION_MIGRATE_TO) == 0)) {
         /* Don't stop monitoring a migrating Pacemaker Remote connection
          * resource until the entire migration has completed. We must detect if
          * the connection is unexpectedly severed, even during a migration.
          */
         return false;
     }
 
     // Cancel recurring actions before changing resource state
     return (interval_ms == 0)
             && !pcmk__str_any_of(action, PCMK_ACTION_MONITOR,
                                  PCMK_ACTION_NOTIFY, NULL);
 }
 
 /*!
  * \internal
  * \brief Check whether an action should not be performed at this time
  *
  * \param[in] operation  Action to be performed
  *
  * \return Readable description of why action should not be performed,
  *         or NULL if it should be performed
  */
 static const char *
 should_nack_action(const char *action)
 {
     if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)
         && pcmk__str_eq(action, PCMK_ACTION_START, pcmk__str_none)) {
 
         register_fsa_input(C_SHUTDOWN, I_SHUTDOWN, NULL);
         return "Not attempting start due to shutdown in progress";
     }
 
     switch (controld_globals.fsa_state) {
         case S_NOT_DC:
         case S_POLICY_ENGINE:   // Recalculating
         case S_TRANSITION_ENGINE:
             break;
         default:
             if (!pcmk__str_eq(action, PCMK_ACTION_STOP, pcmk__str_none)) {
                 return "Controller cannot attempt actions at this time";
             }
             break;
     }
     return NULL;
 }
 
 static void
 do_lrm_rsc_op(lrm_state_t *lrm_state, lrmd_rsc_info_t *rsc, xmlNode *msg,
               struct ra_metadata_s *md)
 {
     int rc;
     int call_id = 0;
     char *op_id = NULL;
     lrmd_event_data_t *op = NULL;
     fsa_data_t *msg_data = NULL;
     const char *transition = NULL;
     const char *operation = NULL;
     const char *nack_reason = NULL;
 
     CRM_CHECK((rsc != NULL) && (msg != NULL), return);
 
     operation = crm_element_value(msg, PCMK_XA_OPERATION);
     CRM_CHECK(!pcmk__str_empty(operation), return);
 
     transition = crm_element_value(msg, PCMK__XA_TRANSITION_KEY);
     if (pcmk__str_empty(transition)) {
         crm_log_xml_err(msg, "Missing transition number");
     }
 
     if (lrm_state == NULL) {
         // This shouldn't be possible, but provide a failsafe just in case
         crm_err("Cannot execute %s of %s: No executor connection "
                 QB_XS " transition_key=%s",
                 operation, rsc->id, pcmk__s(transition, ""));
         synthesize_lrmd_failure(NULL, msg, PCMK_EXEC_INVALID,
                                 PCMK_OCF_UNKNOWN_ERROR,
                                 "No executor connection");
         return;
     }
 
     if (pcmk__str_any_of(operation, PCMK_ACTION_RELOAD,
                          PCMK_ACTION_RELOAD_AGENT, NULL)) {
         /* Pre-2.1.0 DCs will schedule reload actions only, and 2.1.0+ DCs
          * will schedule reload-agent actions only. In either case, we need
          * to map that to whatever the resource agent actually supports.
          * Default to the OCF 1.1 name.
          */
         if ((md != NULL)
             && pcmk_is_set(md->ra_flags, ra_supports_legacy_reload)) {
             operation = PCMK_ACTION_RELOAD;
         } else {
             operation = PCMK_ACTION_RELOAD_AGENT;
         }
     }
 
     op = construct_op(lrm_state, msg, rsc->id, operation);
     CRM_CHECK(op != NULL, return);
 
     if (should_cancel_recurring(rsc->id, operation, op->interval_ms)) {
         guint removed = 0;
         struct stop_recurring_action_s data;
 
         data.rsc = rsc;
         data.lrm_state = lrm_state;
         removed = g_hash_table_foreach_remove(lrm_state->active_ops,
                                               stop_recurring_action_by_rsc,
                                               &data);
 
         if (removed) {
             crm_debug("Stopped %u recurring operation%s in preparation for "
                       PCMK__OP_FMT, removed, pcmk__plural_s(removed),
                       rsc->id, operation, op->interval_ms);
         }
     }
 
     /* now do the op */
     crm_notice("Requesting local execution of %s operation for %s on %s "
                QB_XS " transition_key=%s op_key=" PCMK__OP_FMT,
                pcmk__readable_action(op->op_type, op->interval_ms), rsc->id,
                lrm_state->node_name, pcmk__s(transition, ""), rsc->id,
                operation, op->interval_ms);
 
     nack_reason = should_nack_action(operation);
     if (nack_reason != NULL) {
         crm_notice("Discarding attempt to perform action %s on %s in state %s "
                    "(shutdown=%s)", operation, rsc->id,
                    fsa_state2string(controld_globals.fsa_state),
                    pcmk__flag_text(controld_globals.fsa_input_register,
                                    R_SHUTDOWN));
 
         lrmd__set_result(op, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_INVALID,
                          nack_reason);
         controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id);
         lrmd_free_event(op);
         free(op_id);
         return;
     }
 
     controld_record_pending_op(lrm_state->node_name, rsc, op);
 
     op_id = pcmk__op_key(rsc->id, op->op_type, op->interval_ms);
 
     if (op->interval_ms > 0) {
         /* cancel it so we can then restart it without conflict */
         cancel_op_key(lrm_state, rsc, op_id, FALSE);
     }
 
     rc = controld_execute_resource_agent(lrm_state, rsc->id, op->op_type,
                                          op->user_data, op->interval_ms,
                                          op->timeout, op->start_delay,
                                          op->params, &call_id);
     if (rc == pcmk_rc_ok) {
         /* record all operations so we can wait
          * for them to complete during shutdown
          */
         char *call_id_s = make_stop_id(rsc->id, call_id);
         active_op_t *pending = NULL;
 
         pending = pcmk__assert_alloc(1, sizeof(active_op_t));
         crm_trace("Recording pending op: %d - %s %s", call_id, op_id, call_id_s);
 
         pending->call_id = call_id;
         pending->interval_ms = op->interval_ms;
         pending->op_type = pcmk__str_copy(operation);
         pending->op_key = pcmk__str_copy(op_id);
         pending->rsc_id = pcmk__str_copy(rsc->id);
         pending->start_time = time(NULL);
         pending->user_data = pcmk__str_copy(op->user_data);
         if (crm_element_value_epoch(msg, PCMK_OPT_SHUTDOWN_LOCK,
                                     &(pending->lock_time)) != pcmk_ok) {
             pending->lock_time = 0;
         }
         g_hash_table_replace(lrm_state->active_ops, call_id_s, pending);
 
         if ((op->interval_ms > 0)
             && (op->start_delay > START_DELAY_THRESHOLD)) {
             int target_rc = PCMK_OCF_OK;
 
             crm_info("Faking confirmation of %s: execution postponed for over 5 minutes", op_id);
             decode_transition_key(op->user_data, NULL, NULL, NULL, &target_rc);
             lrmd__set_result(op, target_rc, PCMK_EXEC_DONE, NULL);
             controld_ack_event_directly(NULL, NULL, rsc, op, rsc->id);
         }
 
         pending->params = op->params;
         op->params = NULL;
 
     } else if (lrm_state_is_local(lrm_state)) {
         crm_err("Could not initiate %s action for resource %s locally: %s "
                 QB_XS " rc=%d", operation, rsc->id, pcmk_rc_str(rc), rc);
         fake_op_status(lrm_state, op, PCMK_EXEC_NOT_CONNECTED,
                        PCMK_OCF_UNKNOWN_ERROR, pcmk_rc_str(rc));
         process_lrm_event(lrm_state, op, NULL, NULL);
         register_fsa_error(C_FSA_INTERNAL, I_FAIL, NULL);
 
     } else {
         crm_err("Could not initiate %s action for resource %s remotely on %s: "
                 "%s " QB_XS " rc=%d",
                 operation, rsc->id, lrm_state->node_name, pcmk_rc_str(rc), rc);
         fake_op_status(lrm_state, op, PCMK_EXEC_NOT_CONNECTED,
                        PCMK_OCF_UNKNOWN_ERROR, pcmk_rc_str(rc));
         process_lrm_event(lrm_state, op, NULL, NULL);
     }
 
     free(op_id);
     lrmd_free_event(op);
 }
 
 void
 do_lrm_event(long long action,
              enum crmd_fsa_cause cause,
              enum crmd_fsa_state cur_state, enum crmd_fsa_input cur_input, fsa_data_t * msg_data)
 {
     CRM_CHECK(FALSE, return);
 }
 
 static char *
 unescape_newlines(const char *string)
 {
     char *pch = NULL;
     char *ret = NULL;
     static const char *escaped_newline = "\\n";
 
     if (!string) {
         return NULL;
     }
 
     ret = pcmk__str_copy(string);
     pch = strstr(ret, escaped_newline);
     while (pch != NULL) {
         /* Replace newline escape pattern with actual newline (and a space so we
          * don't have to shuffle the rest of the buffer)
          */
         pch[0] = '\n';
         pch[1] = ' ';
         pch = strstr(pch, escaped_newline);
     }
 
     return ret;
 }
 
 static bool
 did_lrm_rsc_op_fail(lrm_state_t *lrm_state, const char * rsc_id,
                     const char * op_type, guint interval_ms)
 {
     rsc_history_t *entry = NULL;
 
     CRM_CHECK(lrm_state != NULL, return FALSE);
     CRM_CHECK(rsc_id != NULL, return FALSE);
     CRM_CHECK(op_type != NULL, return FALSE);
 
     entry = g_hash_table_lookup(lrm_state->resource_history, rsc_id);
     if (entry == NULL || entry->failed == NULL) {
         return FALSE;
     }
 
     if (pcmk__str_eq(entry->failed->rsc_id, rsc_id, pcmk__str_none)
         && pcmk__str_eq(entry->failed->op_type, op_type, pcmk__str_casei)
         && entry->failed->interval_ms == interval_ms) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Log the result of an executor action (actual or synthesized)
  *
  * \param[in] op         Executor action to log result for
  * \param[in] op_key     Operation key for action
  * \param[in] node_name  Name of node action was performed on, if known
  * \param[in] confirmed  Whether to log that graph action was confirmed
  */
 static void
 log_executor_event(const lrmd_event_data_t *op, const char *op_key,
                    const char *node_name, gboolean confirmed)
 {
     int log_level = LOG_ERR;
     GString *str = g_string_sized_new(100); // reasonable starting size
 
     pcmk__g_strcat(str,
                    "Result of ",
                    pcmk__readable_action(op->op_type, op->interval_ms),
                    " operation for ", op->rsc_id, NULL);
 
     if (node_name != NULL) {
         pcmk__g_strcat(str, " on ", node_name, NULL);
     }
 
     switch (op->op_status) {
         case PCMK_EXEC_DONE:
             log_level = LOG_NOTICE;
             pcmk__g_strcat(str, ": ", services_ocf_exitcode_str(op->rc), NULL);
             break;
 
         case PCMK_EXEC_TIMEOUT:
             pcmk__g_strcat(str,
                            ": ", pcmk_exec_status_str(op->op_status), " after ",
                            pcmk__readable_interval(op->timeout), NULL);
             break;
 
         case PCMK_EXEC_CANCELLED:
             log_level = LOG_INFO;
 	    /* order of __attribute__ and Fall through comment is IMPORTANT!
 	     * do not change it without proper testing with both clang and gcc
 	     * in multiple versions.
 	     * the clang check allows to build with all versions of clang.
 	     * the has_c_attribute check is to workaround a bug in clang version
 	     * in rhel7. has_attribute would happily return "YES SIR WE GOT IT"
 	     * and fail the build the next line.
 	     */
 #ifdef __clang__
 #ifdef __has_c_attribute
 #if __has_attribute(fallthrough)
 	    __attribute__((fallthrough));
 #endif
 #endif
 #endif
             // Fall through
         default:
             pcmk__g_strcat(str, ": ", pcmk_exec_status_str(op->op_status),
                            NULL);
     }
 
     if ((op->exit_reason != NULL)
         && ((op->op_status != PCMK_EXEC_DONE) || (op->rc != PCMK_OCF_OK))) {
 
         pcmk__g_strcat(str, " (", op->exit_reason, ")", NULL);
     }
 
     g_string_append(str, " " QB_XS);
     g_string_append_printf(str, " graph action %sconfirmed; call=%d key=%s",
                            (confirmed? "" : "un"), op->call_id, op_key);
     if (op->op_status == PCMK_EXEC_DONE) {
         g_string_append_printf(str, " rc=%d", op->rc);
     }
 
     do_crm_log(log_level, "%s", str->str);
     g_string_free(str, TRUE);
 
     /* The services library has already logged the output at info or debug
      * level, so just raise to notice if it looks like a failure.
      */
     if ((op->output != NULL) && (op->rc != PCMK_OCF_OK)) {
         char *prefix = crm_strdup_printf(PCMK__OP_FMT "@%s output",
                                          op->rsc_id, op->op_type,
                                          op->interval_ms, node_name);
 
         crm_log_output(LOG_NOTICE, prefix, op->output);
         free(prefix);
     }
 }
 
 void
 process_lrm_event(lrm_state_t *lrm_state, lrmd_event_data_t *op,
                   active_op_t *pending, const xmlNode *action_xml)
 {
     char *op_id = NULL;
     char *op_key = NULL;
 
     gboolean remove = FALSE;
     gboolean removed = FALSE;
     bool need_direct_ack = FALSE;
     lrmd_rsc_info_t *rsc = NULL;
     const char *node_name = NULL;
 
     CRM_CHECK(op != NULL, return);
     CRM_CHECK(op->rsc_id != NULL, return);
 
     // Remap new status codes for older DCs
     if (compare_version(controld_globals.dc_version, "3.2.0") < 0) {
         switch (op->op_status) {
             case PCMK_EXEC_NOT_CONNECTED:
                 lrmd__set_result(op, PCMK_OCF_CONNECTION_DIED,
                                  PCMK_EXEC_ERROR, op->exit_reason);
                 break;
             case PCMK_EXEC_INVALID:
                 lrmd__set_result(op, CRM_DIRECT_NACK_RC, PCMK_EXEC_ERROR,
                                  op->exit_reason);
                 break;
             default:
                 break;
         }
     }
 
     op_id = make_stop_id(op->rsc_id, op->call_id);
     op_key = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms);
 
     // Get resource info if available (from executor state or action XML)
     if (lrm_state) {
         rsc = lrm_state_get_rsc_info(lrm_state, op->rsc_id, 0);
     }
     if ((rsc == NULL) && action_xml) {
         xmlNode *xml = pcmk__xe_first_child(action_xml, PCMK_XE_PRIMITIVE, NULL,
                                             NULL);
 
         const char *standard = crm_element_value(xml, PCMK_XA_CLASS);
         const char *provider = crm_element_value(xml, PCMK_XA_PROVIDER);
         const char *type = crm_element_value(xml, PCMK_XA_TYPE);
 
         if (standard && type) {
             crm_info("%s agent information not cached, using %s%s%s:%s from action XML",
                      op->rsc_id, standard,
                      (provider? ":" : ""), (provider? provider : ""), type);
             rsc = lrmd_new_rsc_info(op->rsc_id, standard, provider, type);
         } else {
             crm_err("Can't process %s result because %s agent information not cached or in XML",
                     op_key, op->rsc_id);
         }
     }
 
     // Get node name if available (from executor state or action XML)
     if (lrm_state) {
         node_name = lrm_state->node_name;
     } else if (action_xml) {
         node_name = crm_element_value(action_xml, PCMK__META_ON_NODE);
     }
 
     if(pending == NULL) {
         remove = TRUE;
         if (lrm_state) {
             pending = g_hash_table_lookup(lrm_state->active_ops, op_id);
         }
     }
 
     if (op->op_status == PCMK_EXEC_ERROR) {
         switch(op->rc) {
             case PCMK_OCF_NOT_RUNNING:
             case PCMK_OCF_RUNNING_PROMOTED:
             case PCMK_OCF_DEGRADED:
             case PCMK_OCF_DEGRADED_PROMOTED:
                 // Leave it to the TE/scheduler to decide if this is an error
                 op->op_status = PCMK_EXEC_DONE;
                 break;
             default:
                 /* Nothing to do */
                 break;
         }
     }
 
     if (op->op_status != PCMK_EXEC_CANCELLED) {
         /* We might not record the result, so directly acknowledge it to the
          * originator instead, so it doesn't time out waiting for the result
          * (especially important if part of a transition).
          */
         need_direct_ack = TRUE;
 
         if (controld_action_is_recordable(op->op_type)) {
             if (node_name && rsc) {
                 // We should record the result, and happily, we can
                 time_t lock_time = (pending == NULL)? 0 : pending->lock_time;
 
                 controld_update_resource_history(node_name, rsc, op, lock_time);
                 need_direct_ack = FALSE;
 
             } else if (op->rsc_deleted) {
                 /* We shouldn't record the result (likely the resource was
                  * refreshed, cleaned, or removed while this operation was
                  * in flight).
                  */
                 crm_notice("Not recording %s result in CIB because "
                            "resource information was removed since it was initiated",
                            op_key);
             } else {
                 /* This shouldn't be possible; the executor didn't consider the
                  * resource deleted, but we couldn't find resource or node
                  * information.
                  */
                 crm_err("Unable to record %s result in CIB: %s", op_key,
                         (node_name? "No resource information" : "No node name"));
             }
         }
 
     } else if (op->interval_ms == 0) {
         /* A non-recurring operation was cancelled. Most likely, the
          * never-initiated action was removed from the executor's pending
          * operations list upon resource removal.
          */
         need_direct_ack = TRUE;
 
     } else if (pending == NULL) {
         /* This recurring operation was cancelled, but was not pending. No
          * transition actions are waiting on it, nothing needs to be done.
          */
 
     } else if (op->user_data == NULL) {
         /* This recurring operation was cancelled and pending, but we don't
          * have a transition key. This should never happen.
          */
         crm_err("Recurring operation %s was cancelled without transition information",
                 op_key);
 
     } else if (pcmk_is_set(pending->flags, active_op_remove)) {
         /* This recurring operation was cancelled (by us) and pending, and we
          * have been waiting for it to finish.
          */
         if (lrm_state) {
             controld_delete_action_history(op);
         }
 
         /* Directly acknowledge failed recurring actions here. The above call to
          * controld_delete_action_history() will not erase any corresponding
          * last_failure entry, which means that the DC won't confirm the
          * cancellation via process_op_deletion(), and the transition would
          * otherwise wait for the action timer to pop.
          */
         if (did_lrm_rsc_op_fail(lrm_state, pending->rsc_id,
                                 pending->op_type, pending->interval_ms)) {
             need_direct_ack = TRUE;
         }
 
     } else if (op->rsc_deleted) {
         /* This recurring operation was cancelled (but not by us, and the
          * executor does not have resource information, likely due to resource
          * cleanup, refresh, or removal) and pending.
          */
         crm_debug("Recurring op %s was cancelled due to resource deletion",
                   op_key);
         need_direct_ack = TRUE;
 
     } else {
         /* This recurring operation was cancelled (but not by us, likely by the
          * executor before stopping the resource) and pending. We don't need to
          * do anything special.
          */
     }
 
     if (need_direct_ack) {
         controld_ack_event_directly(NULL, NULL, NULL, op, op->rsc_id);
     }
 
     if(remove == FALSE) {
         /* The caller will do this afterwards, but keep the logging consistent */
         removed = TRUE;
 
     } else if (lrm_state && ((op->interval_ms == 0)
                              || (op->op_status == PCMK_EXEC_CANCELLED))) {
 
         gboolean found = g_hash_table_remove(lrm_state->active_ops, op_id);
 
         if (op->interval_ms != 0) {
             removed = TRUE;
         } else if (found) {
             removed = TRUE;
             crm_trace("Op %s (call=%d, stop-id=%s, remaining=%u): Confirmed",
                       op_key, op->call_id, op_id,
                       g_hash_table_size(lrm_state->active_ops));
         }
     }
 
     log_executor_event(op, op_key, node_name, removed);
 
     if (lrm_state) {
         if (!pcmk__str_eq(op->op_type, PCMK_ACTION_META_DATA,
                           pcmk__str_casei)) {
             crmd_alert_resource_op(lrm_state->node_name, op);
         } else if (rsc && (op->rc == PCMK_OCF_OK)) {
             char *metadata = unescape_newlines(op->output);
 
             controld_cache_metadata(lrm_state->metadata_cache, rsc, metadata);
             free(metadata);
         }
     }
 
     if (op->rsc_deleted) {
         crm_info("Deletion of resource '%s' complete after %s", op->rsc_id, op_key);
         if (lrm_state) {
             delete_rsc_entry(lrm_state, NULL, op->rsc_id, NULL, pcmk_ok, NULL,
                              true);
         }
     }
 
     /* If a shutdown was escalated while operations were pending,
      * then the FSA will be stalled right now... allow it to continue
      */
     controld_trigger_fsa();
     if (lrm_state && rsc) {
         update_history_cache(lrm_state, rsc, op);
     }
 
     lrmd_free_rsc_info(rsc);
     free(op_key);
     free(op_id);
 }
diff --git a/daemons/controld/controld_execd_state.c b/daemons/controld/controld_execd_state.c
index 30951c009c..4638c20ad8 100644
--- a/daemons/controld/controld_execd_state.c
+++ b/daemons/controld/controld_execd_state.c
@@ -1,818 +1,818 @@
 /*
  * Copyright 2012-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <errno.h>
 
 #include <crm/crm.h>
 #include <crm/common/iso8601.h>
 #include <crm/common/xml.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/rules_internal.h>
 #include <crm/lrmd_internal.h>
 
 #include <pacemaker-internal.h>
 #include <pacemaker-controld.h>
 
 static GHashTable *lrm_state_table = NULL;
 extern GHashTable *proxy_table;
 int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg);
 void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg));
 
 static void
 free_rsc_info(gpointer value)
 {
     lrmd_rsc_info_t *rsc_info = value;
 
     lrmd_free_rsc_info(rsc_info);
 }
 
 static void
 free_deletion_op(gpointer value)
 {
     struct pending_deletion_op_s *op = value;
 
     free(op->rsc);
     delete_ha_msg_input(op->input);
     free(op);
 }
 
 static void
 free_recurring_op(gpointer value)
 {
     active_op_t *op = value;
 
     free(op->user_data);
     free(op->rsc_id);
     free(op->op_type);
     free(op->op_key);
     if (op->params) {
         g_hash_table_destroy(op->params);
     }
     free(op);
 }
 
 static gboolean
 fail_pending_op(gpointer key, gpointer value, gpointer user_data)
 {
     lrmd_event_data_t event = { 0, };
     lrm_state_t *lrm_state = user_data;
     active_op_t *op = value;
 
     crm_trace("Pre-emptively failing " PCMK__OP_FMT " on %s (call=%s, %s)",
               op->rsc_id, op->op_type, op->interval_ms,
               lrm_state->node_name, (char*)key, op->user_data);
 
     event.type = lrmd_event_exec_complete;
     event.rsc_id = op->rsc_id;
     event.op_type = op->op_type;
     event.user_data = op->user_data;
     event.timeout = 0;
     event.interval_ms = op->interval_ms;
     lrmd__set_result(&event, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_NOT_CONNECTED,
                      "Action was pending when executor connection was dropped");
     event.t_run = op->start_time;
-    event.t_rcchange = (unsigned int) op->start_time;
+    event.t_rcchange = op->start_time;
 
     event.call_id = op->call_id;
     event.remote_nodename = lrm_state->node_name;
     event.params = op->params;
 
     process_lrm_event(lrm_state, &event, op, NULL);
     lrmd__reset_result(&event);
     return TRUE;
 }
 
 gboolean
 lrm_state_is_local(lrm_state_t *lrm_state)
 {
     return (lrm_state != NULL)
            && pcmk__str_eq(lrm_state->node_name, controld_globals.our_nodename,
                            pcmk__str_casei);
 }
 
 /*!
  * \internal
  * \brief Create executor state entry for a node and add it to the state table
  *
  * \param[in]  node_name  Node to create entry for
  *
  * \return Newly allocated executor state object initialized for \p node_name
  */
 static lrm_state_t *
 lrm_state_create(const char *node_name)
 {
     lrm_state_t *state = NULL;
 
     if (!node_name) {
         crm_err("No node name given for lrm state object");
         return NULL;
     }
 
     state = pcmk__assert_alloc(1, sizeof(lrm_state_t));
 
     state->node_name = pcmk__str_copy(node_name);
     state->rsc_info_cache = pcmk__strkey_table(NULL, free_rsc_info);
     state->deletion_ops = pcmk__strkey_table(free, free_deletion_op);
     state->active_ops = pcmk__strkey_table(free, free_recurring_op);
     state->resource_history = pcmk__strkey_table(NULL, history_free);
     state->metadata_cache = metadata_cache_new();
 
     g_hash_table_insert(lrm_state_table, (char *)state->node_name, state);
     return state;
 }
 
 static gboolean
 remote_proxy_remove_by_node(gpointer key, gpointer value, gpointer user_data)
 {
     remote_proxy_t *proxy = value;
     const char *node_name = user_data;
 
     if (pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 static remote_proxy_t *
 find_connected_proxy_by_node(const char * node_name)
 {
     GHashTableIter gIter;
     remote_proxy_t *proxy = NULL;
 
     CRM_CHECK(proxy_table != NULL, return NULL);
 
     g_hash_table_iter_init(&gIter, proxy_table);
 
     while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) &proxy)) {
         if (proxy->source
             && pcmk__str_eq(node_name, proxy->node_name, pcmk__str_casei)) {
             return proxy;
         }
     }
 
     return NULL;
 }
 
 static void
 remote_proxy_disconnect_by_node(const char * node_name)
 {
     remote_proxy_t *proxy = NULL;
 
     CRM_CHECK(proxy_table != NULL, return);
 
     while ((proxy = find_connected_proxy_by_node(node_name)) != NULL) {
         /* mainloop_del_ipc_client() eventually calls remote_proxy_disconnected()
          * , which removes the entry from proxy_table.
          * Do not do this in a g_hash_table_iter_next() loop. */
         if (proxy->source) {
             mainloop_del_ipc_client(proxy->source);
         }
     }
 
     return;
 }
 
 static void
 internal_lrm_state_destroy(gpointer data)
 {
     lrm_state_t *lrm_state = data;
 
     if (!lrm_state) {
         return;
     }
 
     /* Rather than directly remove the recorded proxy entries from proxy_table,
      * make sure any connected proxies get disconnected. So that
      * remote_proxy_disconnected() will be called and as well remove the
      * entries from proxy_table.
      */
     remote_proxy_disconnect_by_node(lrm_state->node_name);
 
     crm_trace("Destroying proxy table %s with %u members",
               lrm_state->node_name, g_hash_table_size(proxy_table));
     // Just in case there's still any leftovers in proxy_table
     g_hash_table_foreach_remove(proxy_table, remote_proxy_remove_by_node, (char *) lrm_state->node_name);
     remote_ra_cleanup(lrm_state);
     lrmd_api_delete(lrm_state->conn);
 
     if (lrm_state->rsc_info_cache) {
         crm_trace("Destroying rsc info cache with %u members",
                   g_hash_table_size(lrm_state->rsc_info_cache));
         g_hash_table_destroy(lrm_state->rsc_info_cache);
     }
     if (lrm_state->resource_history) {
         crm_trace("Destroying history op cache with %u members",
                   g_hash_table_size(lrm_state->resource_history));
         g_hash_table_destroy(lrm_state->resource_history);
     }
     if (lrm_state->deletion_ops) {
         crm_trace("Destroying deletion op cache with %u members",
                   g_hash_table_size(lrm_state->deletion_ops));
         g_hash_table_destroy(lrm_state->deletion_ops);
     }
     if (lrm_state->active_ops != NULL) {
         crm_trace("Destroying pending op cache with %u members",
                   g_hash_table_size(lrm_state->active_ops));
         g_hash_table_destroy(lrm_state->active_ops);
     }
     metadata_cache_free(lrm_state->metadata_cache);
 
     free((char *)lrm_state->node_name);
     free(lrm_state);
 }
 
 void
 lrm_state_reset_tables(lrm_state_t * lrm_state, gboolean reset_metadata)
 {
     if (lrm_state->resource_history) {
         crm_trace("Resetting resource history cache with %u members",
                   g_hash_table_size(lrm_state->resource_history));
         g_hash_table_remove_all(lrm_state->resource_history);
     }
     if (lrm_state->deletion_ops) {
         crm_trace("Resetting deletion operations cache with %u members",
                   g_hash_table_size(lrm_state->deletion_ops));
         g_hash_table_remove_all(lrm_state->deletion_ops);
     }
     if (lrm_state->active_ops != NULL) {
         crm_trace("Resetting active operations cache with %u members",
                   g_hash_table_size(lrm_state->active_ops));
         g_hash_table_remove_all(lrm_state->active_ops);
     }
     if (lrm_state->rsc_info_cache) {
         crm_trace("Resetting resource information cache with %u members",
                   g_hash_table_size(lrm_state->rsc_info_cache));
         g_hash_table_remove_all(lrm_state->rsc_info_cache);
     }
     if (reset_metadata) {
         metadata_cache_reset(lrm_state->metadata_cache);
     }
 }
 
 gboolean
 lrm_state_init_local(void)
 {
     if (lrm_state_table) {
         return TRUE;
     }
 
     lrm_state_table = pcmk__strikey_table(NULL, internal_lrm_state_destroy);
     if (!lrm_state_table) {
         return FALSE;
     }
 
     proxy_table = pcmk__strikey_table(NULL, remote_proxy_free);
     if (!proxy_table) {
         g_hash_table_destroy(lrm_state_table);
         lrm_state_table = NULL;
         return FALSE;
     }
 
     return TRUE;
 }
 
 void
 lrm_state_destroy_all(void)
 {
     if (lrm_state_table) {
         crm_trace("Destroying state table with %u members",
                   g_hash_table_size(lrm_state_table));
         g_hash_table_destroy(lrm_state_table); lrm_state_table = NULL;
     }
     if(proxy_table) {
         crm_trace("Destroying proxy table with %u members",
                   g_hash_table_size(proxy_table));
         g_hash_table_destroy(proxy_table); proxy_table = NULL;
     }
 }
 
 lrm_state_t *
 lrm_state_find(const char *node_name)
 {
     if ((node_name == NULL) || (lrm_state_table == NULL)) {
         return NULL;
     }
     return g_hash_table_lookup(lrm_state_table, node_name);
 }
 
 lrm_state_t *
 lrm_state_find_or_create(const char *node_name)
 {
     lrm_state_t *lrm_state;
 
     CRM_CHECK(lrm_state_table != NULL, return NULL);
 
     lrm_state = g_hash_table_lookup(lrm_state_table, node_name);
     if (!lrm_state) {
         lrm_state = lrm_state_create(node_name);
     }
 
     return lrm_state;
 }
 
 GList *
 lrm_state_get_list(void)
 {
     if (lrm_state_table == NULL) {
         return NULL;
     }
     return g_hash_table_get_values(lrm_state_table);
 }
 
 void
 lrm_state_disconnect_only(lrm_state_t * lrm_state)
 {
     int removed = 0;
 
     if (!lrm_state->conn) {
         return;
     }
     crm_trace("Disconnecting %s", lrm_state->node_name);
 
     remote_proxy_disconnect_by_node(lrm_state->node_name);
 
     ((lrmd_t *) lrm_state->conn)->cmds->disconnect(lrm_state->conn);
 
     if (!pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
         removed = g_hash_table_foreach_remove(lrm_state->active_ops,
                                               fail_pending_op, lrm_state);
         crm_trace("Synthesized %d operation failures for %s", removed, lrm_state->node_name);
     }
 }
 
 void
 lrm_state_disconnect(lrm_state_t * lrm_state)
 {
     if (!lrm_state->conn) {
         return;
     }
 
     lrm_state_disconnect_only(lrm_state);
 
     lrmd_api_delete(lrm_state->conn);
     lrm_state->conn = NULL;
 }
 
 int
 lrm_state_is_connected(lrm_state_t * lrm_state)
 {
     if (!lrm_state->conn) {
         return FALSE;
     }
     return ((lrmd_t *) lrm_state->conn)->cmds->is_connected(lrm_state->conn);
 }
 
 int
 lrm_state_poke_connection(lrm_state_t * lrm_state)
 {
 
     if (!lrm_state->conn) {
         return -ENOTCONN;
     }
     return ((lrmd_t *) lrm_state->conn)->cmds->poke_connection(lrm_state->conn);
 }
 
 // \return Standard Pacemaker return code
 int
 controld_connect_local_executor(lrm_state_t *lrm_state)
 {
     int rc = pcmk_rc_ok;
 
     if (lrm_state->conn == NULL) {
         lrmd_t *api = NULL;
 
         rc = lrmd__new(&api, NULL, NULL, 0);
         if (rc != pcmk_rc_ok) {
             return rc;
         }
         api->cmds->set_callback(api, lrm_op_callback);
         lrm_state->conn = api;
     }
 
     rc = ((lrmd_t *) lrm_state->conn)->cmds->connect(lrm_state->conn,
                                                      CRM_SYSTEM_CRMD, NULL);
     rc = pcmk_legacy2rc(rc);
 
     if (rc == pcmk_rc_ok) {
         lrm_state->num_lrm_register_fails = 0;
     } else {
         lrm_state->num_lrm_register_fails++;
     }
     return rc;
 }
 
 static remote_proxy_t *
 crmd_remote_proxy_new(lrmd_t *lrmd, const char *node_name, const char *session_id, const char *channel)
 {
     struct ipc_client_callbacks proxy_callbacks = {
         .dispatch = remote_proxy_dispatch,
         .destroy = remote_proxy_disconnected
     };
     remote_proxy_t *proxy = remote_proxy_new(lrmd, &proxy_callbacks, node_name,
                                              session_id, channel);
     return proxy;
 }
 
 gboolean
 crmd_is_proxy_session(const char *session)
 {
     return g_hash_table_lookup(proxy_table, session) ? TRUE : FALSE;
 }
 
 void
 crmd_proxy_send(const char *session, xmlNode *msg)
 {
     remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
     lrm_state_t *lrm_state = NULL;
 
     if (!proxy) {
         return;
     }
     crm_log_xml_trace(msg, "to-proxy");
     lrm_state = lrm_state_find(proxy->node_name);
     if (lrm_state) {
         crm_trace("Sending event to %.8s on %s", proxy->session_id, proxy->node_name);
         remote_proxy_relay_event(proxy, msg);
     }
 }
 
 static void
 crmd_proxy_dispatch(const char *session, xmlNode *msg)
 {
     crm_trace("Processing proxied IPC message from session %s", session);
     crm_log_xml_trace(msg, "controller[inbound]");
     crm_xml_add(msg, PCMK__XA_CRM_SYS_FROM, session);
     if (controld_authorize_ipc_message(msg, NULL, session)) {
         route_message(C_IPC_MESSAGE, msg);
     }
     controld_trigger_fsa();
 }
 
 static void
 remote_config_check(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     if (rc != pcmk_ok) {
         crm_err("Query resulted in an error: %s", pcmk_strerror(rc));
 
         if (rc == -EACCES || rc == -pcmk_err_schema_validation) {
             crm_err("The cluster is mis-configured - shutting down and staying down");
         }
 
     } else {
         lrmd_t * lrmd = (lrmd_t *)user_data;
         crm_time_t *now = crm_time_new(NULL);
         GHashTable *config_hash = pcmk__strkey_table(free, free);
 
         crm_debug("Call %d : Parsing CIB options", call_id);
 
         pe_unpack_nvpairs(output, output, PCMK_XE_CLUSTER_PROPERTY_SET, NULL,
                           config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS, FALSE,
                           now, NULL);
 
         /* Now send it to the remote peer */
         lrmd__validate_remote_settings(lrmd, config_hash);
 
         g_hash_table_destroy(config_hash);
         crm_time_free(now);
     }
 }
 
 static void
 crmd_remote_proxy_cb(lrmd_t *lrmd, void *userdata, xmlNode *msg)
 {
     lrm_state_t *lrm_state = userdata;
     const char *session = crm_element_value(msg, PCMK__XA_LRMD_IPC_SESSION);
     remote_proxy_t *proxy = g_hash_table_lookup(proxy_table, session);
 
     const char *op = crm_element_value(msg, PCMK__XA_LRMD_IPC_OP);
     if (pcmk__str_eq(op, LRMD_IPC_OP_NEW, pcmk__str_casei)) {
         const char *channel = crm_element_value(msg, PCMK__XA_LRMD_IPC_SERVER);
 
         proxy = crmd_remote_proxy_new(lrmd, lrm_state->node_name, session, channel);
         if (!remote_ra_controlling_guest(lrm_state)) {
             if (proxy != NULL) {
                 cib_t *cib_conn = controld_globals.cib_conn;
 
                 /* Look up PCMK_OPT_STONITH_WATCHDOG_TIMEOUT and send to the
                  * remote peer for validation
                  */
                 int rc = cib_conn->cmds->query(cib_conn, PCMK_XE_CRM_CONFIG,
                                                NULL, cib_none);
                 cib_conn->cmds->register_callback_full(cib_conn, rc, 10, FALSE,
                                                        lrmd,
                                                        "remote_config_check",
                                                        remote_config_check,
                                                        NULL);
             }
         } else {
             crm_debug("Skipping remote_config_check for guest-nodes");
         }
 
     } else if (pcmk__str_eq(op, LRMD_IPC_OP_SHUTDOWN_REQ, pcmk__str_casei)) {
         char *now_s = NULL;
 
         crm_notice("%s requested shutdown of its remote connection",
                    lrm_state->node_name);
 
         if (!remote_ra_is_in_maintenance(lrm_state)) {
             now_s = pcmk__ttoa(time(NULL));
             update_attrd(lrm_state->node_name, PCMK__NODE_ATTR_SHUTDOWN, now_s,
                          NULL, TRUE);
             free(now_s);
 
             remote_proxy_ack_shutdown(lrmd);
 
             crm_warn("Reconnection attempts to %s may result in failures that must be cleared",
                     lrm_state->node_name);
         } else {
             remote_proxy_nack_shutdown(lrmd);
 
             crm_notice("Remote resource for %s is not managed so no ordered shutdown happening",
                     lrm_state->node_name);
         }
         return;
 
     } else if (pcmk__str_eq(op, LRMD_IPC_OP_REQUEST, pcmk__str_casei) && proxy && proxy->is_local) {
         /* This is for the controller, which we are, so don't try
          * to send to ourselves over IPC -- do it directly.
          */
         int flags = 0;
         xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_LRMD_IPC_MSG,
                                                 NULL, NULL);
         xmlNode *request = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
 
         CRM_CHECK(request != NULL, return);
         CRM_CHECK(lrm_state->node_name, return);
         crm_xml_add(request, PCMK_XE_ACL_ROLE, "pacemaker-remote");
         pcmk__update_acl_user(request, PCMK__XA_LRMD_IPC_USER,
                               lrm_state->node_name);
 
         /* Pacemaker Remote nodes don't know their own names (as known to the
          * cluster). When getting a node info request with no name or ID, add
          * the name, so we don't return info for ourselves instead of the
          * Pacemaker Remote node.
          */
         if (pcmk__str_eq(crm_element_value(request, PCMK__XA_CRM_TASK),
                          CRM_OP_NODE_INFO, pcmk__str_none)) {
             int node_id = 0;
 
             crm_element_value_int(request, PCMK_XA_ID, &node_id);
             if ((node_id <= 0)
                 && (crm_element_value(request, PCMK_XA_UNAME) == NULL)) {
                 crm_xml_add(request, PCMK_XA_UNAME, lrm_state->node_name);
             }
         }
 
         crmd_proxy_dispatch(session, request);
 
         crm_element_value_int(msg, PCMK__XA_LRMD_IPC_MSG_FLAGS, &flags);
         if (flags & crm_ipc_client_response) {
             int msg_id = 0;
             xmlNode *op_reply = pcmk__xe_create(NULL, PCMK__XE_ACK);
 
             crm_xml_add(op_reply, PCMK_XA_FUNCTION, __func__);
             crm_xml_add_int(op_reply, PCMK__XA_LINE, __LINE__);
 
             crm_element_value_int(msg, PCMK__XA_LRMD_IPC_MSG_ID, &msg_id);
             remote_proxy_relay_response(proxy, op_reply, msg_id);
 
             pcmk__xml_free(op_reply);
         }
 
     } else {
         remote_proxy_cb(lrmd, lrm_state->node_name, msg);
     }
 }
 
 
 // \return Standard Pacemaker return code
 int
 controld_connect_remote_executor(lrm_state_t *lrm_state, const char *server,
                                  int port, int timeout_ms)
 {
     int rc = pcmk_rc_ok;
 
     if (lrm_state->conn == NULL) {
         lrmd_t *api = NULL;
 
         rc = lrmd__new(&api, lrm_state->node_name, server, port);
         if (rc != pcmk_rc_ok) {
             crm_warn("Pacemaker Remote connection to %s:%s failed: %s "
                      QB_XS " rc=%d", server, port, pcmk_rc_str(rc), rc);
 
             return rc;
         }
         lrm_state->conn = api;
         api->cmds->set_callback(api, remote_lrm_op_callback);
         lrmd_internal_set_proxy_callback(api, lrm_state, crmd_remote_proxy_cb);
     }
 
     crm_trace("Initiating remote connection to %s:%d with timeout %dms",
               server, port, timeout_ms);
     rc = ((lrmd_t *) lrm_state->conn)->cmds->connect_async(lrm_state->conn,
                                                            lrm_state->node_name,
                                                            timeout_ms);
     if (rc == pcmk_ok) {
         lrm_state->num_lrm_register_fails = 0;
     } else {
         lrm_state->num_lrm_register_fails++; // Ignored for remote connections
     }
     return pcmk_legacy2rc(rc);
 }
 
 int
 lrm_state_get_metadata(lrm_state_t * lrm_state,
                        const char *class,
                        const char *provider,
                        const char *agent, char **output, enum lrmd_call_options options)
 {
     lrmd_key_value_t *params = NULL;
 
     if (!lrm_state->conn) {
         return -ENOTCONN;
     }
 
     /* Add the node name to the environment, as is done with normal resource
      * action calls. Meta-data calls shouldn't need it, but some agents are
      * written with an ocf_local_nodename call at the beginning regardless of
      * action. Without the environment variable, the agent would try to contact
      * the controller to get the node name -- but the controller would be
      * blocking on the synchronous meta-data call.
      *
      * At this point, we have to assume that agents are unlikely to make other
      * calls that require the controller, such as crm_node --quorum or
      * --cluster-id.
      *
      * @TODO Make meta-data calls asynchronous. (This will be part of a larger
      * project to make meta-data calls via the executor rather than directly.)
      */
     params = lrmd_key_value_add(params, CRM_META "_" PCMK__META_ON_NODE,
                                 lrm_state->node_name);
 
     return ((lrmd_t *) lrm_state->conn)->cmds->get_metadata_params(lrm_state->conn,
             class, provider, agent, output, options, params);
 }
 
 int
 lrm_state_cancel(lrm_state_t *lrm_state, const char *rsc_id, const char *action,
                  guint interval_ms)
 {
     if (!lrm_state->conn) {
         return -ENOTCONN;
     }
 
     /* Figure out a way to make this async?
      * NOTICE: Currently it's synced and directly acknowledged in do_lrm_invoke(). */
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         return remote_ra_cancel(lrm_state, rsc_id, action, interval_ms);
     }
     return ((lrmd_t *) lrm_state->conn)->cmds->cancel(lrm_state->conn, rsc_id,
                                                       action, interval_ms);
 }
 
 lrmd_rsc_info_t *
 lrm_state_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id, enum lrmd_call_options options)
 {
     lrmd_rsc_info_t *rsc = NULL;
 
     if (!lrm_state->conn) {
         return NULL;
     }
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         return remote_ra_get_rsc_info(lrm_state, rsc_id);
     }
 
     rsc = g_hash_table_lookup(lrm_state->rsc_info_cache, rsc_id);
     if (rsc == NULL) {
         /* only contact the lrmd if we don't already have a cached rsc info */
         rsc = ((lrmd_t *) lrm_state->conn)->cmds->get_rsc_info(lrm_state->conn, rsc_id, options);
         if (rsc == NULL) {
 		    return NULL;
         }
         /* cache the result */
         g_hash_table_insert(lrm_state->rsc_info_cache, rsc->id, rsc);
     }
 
     return lrmd_copy_rsc_info(rsc);
 
 }
 
 /*!
  * \internal
  * \brief Initiate a resource agent action
  *
  * \param[in,out] lrm_state       Executor state object
  * \param[in]     rsc_id          ID of resource for action
  * \param[in]     action          Action to execute
  * \param[in]     userdata        String to copy and pass to execution callback
  * \param[in]     interval_ms     Action interval (in milliseconds)
  * \param[in]     timeout_ms      Action timeout (in milliseconds)
  * \param[in]     start_delay_ms  Delay (in ms) before initiating action
  * \param[in]     parameters      Hash table of resource parameters
  * \param[out]    call_id         Where to store call ID on success
  *
  * \return Standard Pacemaker return code
  */
 int
 controld_execute_resource_agent(lrm_state_t *lrm_state, const char *rsc_id,
                                 const char *action, const char *userdata,
                                 guint interval_ms, int timeout_ms,
                                 int start_delay_ms, GHashTable *parameters,
                                 int *call_id)
 {
     int rc = pcmk_rc_ok;
     lrmd_key_value_t *params = NULL;
 
     if (lrm_state->conn == NULL) {
         return ENOTCONN;
     }
 
     // Convert parameters from hash table to list
     if (parameters != NULL) {
         const char *key = NULL;
         const char *value = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, parameters);
         while (g_hash_table_iter_next(&iter, (gpointer *) &key,
                                       (gpointer *) &value)) {
             params = lrmd_key_value_add(params, key, value);
         }
     }
 
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         rc = controld_execute_remote_agent(lrm_state, rsc_id, action,
                                            userdata, interval_ms, timeout_ms,
                                            start_delay_ms, params, call_id);
 
     } else {
         rc = ((lrmd_t *) lrm_state->conn)->cmds->exec(lrm_state->conn, rsc_id,
                                                       action, userdata,
                                                       interval_ms, timeout_ms,
                                                       start_delay_ms,
                                                       lrmd_opt_notify_changes_only,
                                                       params);
         if (rc < 0) {
             rc = pcmk_legacy2rc(rc);
         } else {
             *call_id = rc;
             rc = pcmk_rc_ok;
         }
     }
     return rc;
 }
 
 int
 lrm_state_register_rsc(lrm_state_t * lrm_state,
                        const char *rsc_id,
                        const char *class,
                        const char *provider, const char *agent, enum lrmd_call_options options)
 {
     lrmd_t *conn = (lrmd_t *) lrm_state->conn;
 
     if (conn == NULL) {
         return -ENOTCONN;
     }
 
     if (is_remote_lrmd_ra(agent, provider, NULL)) {
         return lrm_state_find_or_create(rsc_id)? pcmk_ok : -EINVAL;
     }
 
     /* @TODO Implement an asynchronous version of this (currently a blocking
      * call to the lrmd).
      */
     return conn->cmds->register_rsc(lrm_state->conn, rsc_id, class, provider,
                                     agent, options);
 }
 
 int
 lrm_state_unregister_rsc(lrm_state_t * lrm_state,
                          const char *rsc_id, enum lrmd_call_options options)
 {
     if (!lrm_state->conn) {
         return -ENOTCONN;
     }
 
     if (is_remote_lrmd_ra(NULL, NULL, rsc_id)) {
         g_hash_table_remove(lrm_state_table, rsc_id);
         return pcmk_ok;
     }
 
     g_hash_table_remove(lrm_state->rsc_info_cache, rsc_id);
 
     /* @TODO Optimize this ... this function is a blocking round trip from
      * client to daemon. The controld_execd_state.c code path that uses this
      * function should always treat it as an async operation. The executor API
      * should make an async version available.
      */
     return ((lrmd_t *) lrm_state->conn)->cmds->unregister_rsc(lrm_state->conn, rsc_id, options);
 }
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index 4afaacc4c3..90daa2df1b 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1473 +1,1473 @@
 /*
  * Copyright 2013-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/lrmd.h>
 #include <crm/lrmd_internal.h>
 #include <crm/services.h>
 
 #include <pacemaker-controld.h>
 
 #define REMOTE_LRMD_RA "remote"
 
 /* The max start timeout before cmd retry */
 #define MAX_START_TIMEOUT_MS 10000
 
 #define cmd_set_flags(cmd, flags_to_set) do { \
     (cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
                                        "Remote command", (cmd)->rsc_id, (cmd)->status, \
                                        (flags_to_set), #flags_to_set); \
         } while (0)
 
 #define cmd_clear_flags(cmd, flags_to_clear) do { \
     (cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
                                          "Remote command", (cmd)->rsc_id, (cmd)->status, \
                                          (flags_to_clear), #flags_to_clear); \
         } while (0)
 
 enum remote_cmd_status {
     cmd_reported_success    = (1 << 0),
     cmd_cancel              = (1 << 1),
 };
 
 typedef struct remote_ra_cmd_s {
     /*! the local node the cmd is issued from */
     char *owner;
     /*! the remote node the cmd is executed on */
     char *rsc_id;
     /*! the action to execute */
     char *action;
     /*! some string the client wants us to give it back */
     char *userdata;
     /*! start delay in ms */
     int start_delay;
     /*! timer id used for start delay. */
     int delay_id;
     /*! timeout in ms for cmd */
     int timeout;
     int remaining_timeout;
     /*! recurring interval in ms */
     guint interval_ms;
     /*! interval timer id */
     int interval_id;
     int monitor_timeout_id;
     int takeover_timeout_id;
     /*! action parameters */
     lrmd_key_value_t *params;
     pcmk__action_result_t result;
     int call_id;
     time_t start_time;
     uint32_t status;
 } remote_ra_cmd_t;
 
 #define lrm_remote_set_flags(lrm_state, flags_to_set) do { \
     lrm_state_t *lrm = (lrm_state); \
     remote_ra_data_t *ra = lrm->remote_ra_data; \
     ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
                                     lrm->node_name, ra->status, \
                                     (flags_to_set), #flags_to_set); \
         } while (0)
 
 #define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \
     lrm_state_t *lrm = (lrm_state); \
     remote_ra_data_t *ra = lrm->remote_ra_data; \
     ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
                                       lrm->node_name, ra->status, \
                                       (flags_to_clear), #flags_to_clear); \
         } while (0)
 
 enum remote_status {
     expect_takeover     = (1 << 0),
     takeover_complete   = (1 << 1),
     remote_active       = (1 << 2),
     /* Maintenance mode is difficult to determine from the controller's context,
      * so we have it signalled back with the transition from the scheduler.
      */
     remote_in_maint     = (1 << 3),
     /* Similar for whether we are controlling a guest node or remote node.
      * Fortunately there is a meta-attribute in the transition already and
      * as the situation doesn't change over time we can use the
      * resource start for noting down the information for later use when
      * the attributes aren't at hand.
      */
     controlling_guest   = (1 << 4),
 };
 
 typedef struct remote_ra_data_s {
     crm_trigger_t *work;
     remote_ra_cmd_t *cur_cmd;
     GList *cmds;
     GList *recurring_cmds;
     uint32_t status;
 } remote_ra_data_t;
 
 static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
 static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
 static GList *fail_all_monitor_cmds(GList * list);
 
 static void
 free_cmd(gpointer user_data)
 {
     remote_ra_cmd_t *cmd = user_data;
 
     if (!cmd) {
         return;
     }
     if (cmd->delay_id) {
         g_source_remove(cmd->delay_id);
     }
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
     }
     if (cmd->monitor_timeout_id) {
         g_source_remove(cmd->monitor_timeout_id);
     }
     if (cmd->takeover_timeout_id) {
         g_source_remove(cmd->takeover_timeout_id);
     }
     free(cmd->owner);
     free(cmd->rsc_id);
     free(cmd->action);
     free(cmd->userdata);
     pcmk__reset_result(&(cmd->result));
     lrmd_key_value_freeall(cmd->params);
     free(cmd);
 }
 
 static int
 generate_callid(void)
 {
     static int remote_ra_callid = 0;
 
     remote_ra_callid++;
     if (remote_ra_callid <= 0) {
         remote_ra_callid = 1;
     }
 
     return remote_ra_callid;
 }
 
 static gboolean
 recurring_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->interval_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
 
         ra_data->cmds = g_list_append(ra_data->cmds, cmd);
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 static gboolean
 start_delay_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->delay_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 static bool
 should_purge_attributes(crm_node_t *node)
 {
     bool purge = true;
     crm_node_t *conn_node = NULL;
     lrm_state_t *connection_rsc = NULL;
 
     if (!node->conn_host) {
         return purge;
     }
 
     /* Get the node that was hosting the remote connection resource from the
      * peer cache.  That's the one we really care about here.
      */
     conn_node = pcmk__get_node(0, node->conn_host, NULL,
                                pcmk__node_search_cluster_member);
     if (conn_node == NULL) {
         return purge;
     }
 
     /* Check the uptime of connection_rsc.  If it hasn't been running long
      * enough, set purge=true.  "Long enough" means it started running earlier
      * than the timestamp when we noticed it went away in the first place.
      */
     connection_rsc = lrm_state_find(node->uname);
 
     if (connection_rsc != NULL) {
         lrmd_t *lrm = connection_rsc->conn;
         time_t uptime = lrmd__uptime(lrm);
         time_t now = time(NULL);
 
         /* Add 20s of fuzziness to give corosync a while to notice the remote
          * host is gone.  On various error conditions (failure to get uptime,
          * peer_lost isn't set) we default to purging.
          */
         if (uptime > 0 &&
             conn_node->peer_lost > 0 &&
             uptime + 20 >= now - conn_node->peer_lost) {
             purge = false;
         }
     }
 
     return purge;
 }
 
 static enum controld_section_e
 section_to_delete(bool purge)
 {
     if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
         if (purge) {
             return controld_section_all_unlocked;
         } else {
             return controld_section_lrm_unlocked;
         }
     } else {
         if (purge) {
             return controld_section_all;
         } else {
             return controld_section_lrm;
         }
     }
 }
 
 static void
 purge_remote_node_attrs(int call_opt, crm_node_t *node)
 {
     bool purge = should_purge_attributes(node);
     enum controld_section_e section = section_to_delete(purge);
 
     /* Purge node from attrd's memory */
     if (purge) {
         update_attrd_remote_node_removed(node->uname, NULL);
     }
 
     controld_delete_node_state(node->uname, section, call_opt);
 }
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node joining
  *
  * \param[in] node_name  Name of newly integrated pacemaker_remote node
  */
 static void
 remote_node_up(const char *node_name)
 {
     int call_opt;
     xmlNode *update, *state;
     crm_node_t *node;
     lrm_state_t *connection_rsc = NULL;
 
     CRM_CHECK(node_name != NULL, return);
     crm_info("Announcing Pacemaker Remote node %s", node_name);
 
     call_opt = crmd_cib_smart_opt();
 
     /* Delete node's CRM_OP_PROBED attribute. Deleting any attribute ensures
      * that the attribute manager learns the node is remote. Deletion of this
      * specfic attribute is a holdover from when it had special meaning.
      *
      * @COMPAT Find another way to tell attrd that the node is remote, without
      * risking deletion or overwrite of an arbitrary attribute. Then work on
      * deprecating CRM_OP_PROBED.
      */
     update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
 
     /* Ensure node is in the remote peer cache with member status */
     node = pcmk__cluster_lookup_remote_node(node_name);
     CRM_CHECK(node != NULL, return);
 
     purge_remote_node_attrs(call_opt, node);
     pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
 
     /* Apply any start state that we were given from the environment on the
      * remote node.
      */
     connection_rsc = lrm_state_find(node->uname);
 
     if (connection_rsc != NULL) {
         lrmd_t *lrm = connection_rsc->conn;
         const char *start_state = lrmd__node_start_state(lrm);
 
         if (start_state) {
             set_join_state(start_state, node->uname, node->uuid, true);
         }
     }
 
     /* pacemaker_remote nodes don't participate in the membership layer,
      * so cluster nodes don't automatically get notified when they come and go.
      * We send a cluster message to the DC, and update the CIB node state entry,
      * so the DC will get it sooner (via message) or later (via CIB refresh),
      * and any other interested parties can query the CIB.
      */
     broadcast_remote_state_message(node_name, true);
 
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
     state = create_node_state_update(node, node_update_cluster, update,
                                      __func__);
 
     /* Clear the PCMK__XA_NODE_FENCED flag in the node state. If the node ever
      * needs to be fenced, this flag will allow various actions to determine
      * whether the fencing has happened yet.
      */
     crm_xml_add(state, PCMK__XA_NODE_FENCED, "0");
 
     /* TODO: If the remote connection drops, and this (async) CIB update either
      * failed or has not yet completed, later actions could mistakenly think the
      * node has already been fenced (if the PCMK__XA_NODE_FENCED attribute was
      * previously set, because it won't have been cleared). This could prevent
      * actual fencing or allow recurring monitor failures to be cleared too
      * soon. Ideally, we wouldn't rely on the CIB for the fenced status.
      */
     controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL);
     pcmk__xml_free(update);
 }
 
 enum down_opts {
     DOWN_KEEP_LRM,
     DOWN_ERASE_LRM
 };
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node leaving
  *
  * \param[in] node_name  Name of lost node
  * \param[in] opts       Whether to keep or erase LRM history
  */
 static void
 remote_node_down(const char *node_name, const enum down_opts opts)
 {
     xmlNode *update;
     int call_opt = crmd_cib_smart_opt();
     crm_node_t *node;
 
     /* Purge node from attrd's memory */
     update_attrd_remote_node_removed(node_name, NULL);
 
     /* Normally, only node attributes should be erased, and the resource history
      * should be kept until the node comes back up. However, after a successful
      * fence, we want to clear the history as well, so we don't think resources
      * are still running on the node.
      */
     if (opts == DOWN_ERASE_LRM) {
         controld_delete_node_state(node_name, controld_section_all, call_opt);
     } else {
         controld_delete_node_state(node_name, controld_section_attrs, call_opt);
     }
 
     /* Ensure node is in the remote peer cache with lost state */
     node = pcmk__cluster_lookup_remote_node(node_name);
     CRM_CHECK(node != NULL, return);
     pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0);
 
     /* Notify DC */
     broadcast_remote_state_message(node_name, false);
 
     /* Update CIB node state */
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
     create_node_state_update(node, node_update_cluster, update, __func__);
     controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL);
     pcmk__xml_free(update);
 }
 
 /*!
  * \internal
  * \brief Handle effects of a remote RA command on node state
  *
  * \param[in] cmd  Completed remote RA command
  */
 static void
 check_remote_node_state(const remote_ra_cmd_t *cmd)
 {
     /* Only successful actions can change node state */
     if (!pcmk__result_ok(&(cmd->result))) {
         return;
     }
 
     if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) {
         remote_node_up(cmd->rsc_id);
 
     } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM,
                             pcmk__str_casei)) {
         /* After a successful migration, we don't need to do remote_node_up()
          * because the DC already knows the node is up, and we don't want to
          * clear LRM history etc. We do need to add the remote node to this
          * host's remote peer cache, because (unless it happens to be DC)
          * it hasn't been tracking the remote node, and other code relies on
          * the cache to distinguish remote nodes from unseen cluster nodes.
          */
         crm_node_t *node = pcmk__cluster_lookup_remote_node(cmd->rsc_id);
 
         CRM_CHECK(node != NULL, return);
         pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
 
     } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
         lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
         remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
 
         if (ra_data) {
             if (!pcmk_is_set(ra_data->status, takeover_complete)) {
                 /* Stop means down if we didn't successfully migrate elsewhere */
                 remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
             } else if (AM_I_DC == FALSE) {
                 /* Only the connection host and DC track node state,
                  * so if the connection migrated elsewhere and we aren't DC,
                  * un-cache the node, so we don't have stale info
                  */
                 pcmk__cluster_forget_remote_node(cmd->rsc_id);
             }
         }
     }
 
     /* We don't do anything for successful monitors, which is correct for
      * routine recurring monitors, and for monitors on nodes where the
      * connection isn't supposed to be (the cluster will stop the connection in
      * that case). However, if the initial probe finds the connection already
      * active on the node where we want it, we probably should do
      * remote_node_up(). Unfortunately, we can't distinguish that case here.
      * Given that connections have to be initiated by the cluster, the chance of
      * that should be close to zero.
      */
 }
 
 static void
 report_remote_ra_result(remote_ra_cmd_t * cmd)
 {
     lrmd_event_data_t op = { 0, };
 
     check_remote_node_state(cmd);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = cmd->rsc_id;
     op.op_type = cmd->action;
     op.user_data = cmd->userdata;
     op.timeout = cmd->timeout;
     op.interval_ms = cmd->interval_ms;
     op.t_run = cmd->start_time;
-    op.t_rcchange = (unsigned int) cmd->start_time;
+    op.t_rcchange = cmd->start_time;
 
     lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status,
                      cmd->result.exit_reason);
 
     if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) {
-        op.t_rcchange = (unsigned int) time(NULL);
+        op.t_rcchange = time(NULL);
         /* This edge case will likely never ever occur, but if it does the
          * result is that a failure will not be processed correctly. This is only
          * remotely possible because we are able to detect a connection resource's tcp
          * connection has failed at any moment after start has completed. The actual
          * recurring operation is just a connectivity ping.
          *
          * basically, we are not guaranteed that the first successful monitor op and
          * a subsequent failed monitor op will not occur in the same timestamp. We have to
          * make it look like the operations occurred at separate times though. */
-        if (op.t_rcchange == (unsigned int) op.t_run) {
+        if (op.t_rcchange == op.t_run) {
             op.t_rcchange++;
         }
     }
 
     if (cmd->params) {
         lrmd_key_value_t *tmp;
 
         op.params = pcmk__strkey_table(free, free);
         for (tmp = cmd->params; tmp; tmp = tmp->next) {
             pcmk__insert_dup(op.params, tmp->key, tmp->value);
         }
 
     }
     op.call_id = cmd->call_id;
     op.remote_nodename = cmd->owner;
 
     lrm_op_callback(&op);
 
     if (op.params) {
         g_hash_table_destroy(op.params);
     }
     lrmd__reset_result(&op);
 }
 
 static void
 update_remaining_timeout(remote_ra_cmd_t * cmd)
 {
     cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
 }
 
 static gboolean
 retry_start_cmd_cb(gpointer data)
 {
     lrm_state_t *lrm_state = data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd = NULL;
     int rc = ETIME;
 
     if (!ra_data || !ra_data->cur_cmd) {
         return FALSE;
     }
     cmd = ra_data->cur_cmd;
     if (!pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
                               PCMK_ACTION_MIGRATE_FROM, NULL)) {
         return FALSE;
     }
     update_remaining_timeout(cmd);
 
     if (cmd->remaining_timeout > 0) {
         rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
     } else {
         pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                          PCMK_EXEC_TIMEOUT,
                          "Not enough time remains to retry remote connection");
     }
 
     if (rc != pcmk_rc_ok) {
         report_remote_ra_result(cmd);
 
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         ra_data->cur_cmd = NULL;
         free_cmd(cmd);
     } else {
         /* wait for connection event */
     }
 
     return FALSE;
 }
 
 
 static gboolean
 connection_takeover_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     crm_info("takeover event timed out for node %s", cmd->rsc_id);
     cmd->takeover_timeout_id = 0;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     handle_remote_ra_stop(lrm_state, cmd);
     free_cmd(cmd);
 
     return FALSE;
 }
 
 static gboolean
 monitor_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     crm_info("Timed out waiting for remote poke response from %s%s",
              cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
     cmd->monitor_timeout_id = 0;
     pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT,
                      "Remote executor did not respond");
 
     if (lrm_state && lrm_state->remote_ra_data) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         if (ra_data->cur_cmd == cmd) {
             ra_data->cur_cmd = NULL;
         }
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
     }
 
     report_remote_ra_result(cmd);
     free_cmd(cmd);
 
     if(lrm_state) {
         lrm_state_disconnect(lrm_state);
     }
     return FALSE;
 }
 
 static void
 synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
 {
     lrmd_event_data_t op = { 0, };
 
     if (lrm_state == NULL) {
         /* if lrm_state not given assume local */
         lrm_state = lrm_state_find(controld_globals.our_nodename);
     }
     CRM_ASSERT(lrm_state != NULL);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = rsc_id;
     op.op_type = op_type;
     op.t_run = time(NULL);
-    op.t_rcchange = (unsigned int) op.t_run;
+    op.t_rcchange = op.t_run;
     op.call_id = generate_callid();
     lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
     process_lrm_event(lrm_state, &op, NULL, NULL);
 }
 
 void
 remote_lrm_op_callback(lrmd_event_data_t * op)
 {
     gboolean cmd_handled = FALSE;
     lrm_state_t *lrm_state = NULL;
     remote_ra_data_t *ra_data = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
               "(%d) status=%s (%d)",
               (op->op_type? op->op_type : ""), (op->op_type? " " : ""),
               lrmd_event_type2str(op->type), op->remote_nodename,
               services_ocf_exitcode_str(op->rc), op->rc,
               pcmk_exec_status_str(op->op_status), op->op_status);
 
     lrm_state = lrm_state_find(op->remote_nodename);
     if (!lrm_state || !lrm_state->remote_ra_data) {
         crm_debug("No state information found for remote connection event");
         return;
     }
     ra_data = lrm_state->remote_ra_data;
 
     if (op->type == lrmd_event_new_client) {
         // Another client has connected to the remote daemon
 
         if (pcmk_is_set(ra_data->status, expect_takeover)) {
             // Great, we knew this was coming
             lrm_remote_clear_flags(lrm_state, expect_takeover);
             lrm_remote_set_flags(lrm_state, takeover_complete);
 
         } else {
             crm_err("Disconnecting from Pacemaker Remote node %s due to "
                     "unexpected client takeover", op->remote_nodename);
             /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
             /* Do not free lrm_state->conn yet. */
             /* It'll be freed in the following stop action. */
             lrm_state_disconnect_only(lrm_state);
         }
         return;
     }
 
     /* filter all EXEC events up */
     if (op->type == lrmd_event_exec_complete) {
         if (pcmk_is_set(ra_data->status, takeover_complete)) {
             crm_debug("ignoring event, this connection is taken over by another node");
         } else {
             lrm_op_callback(op);
         }
         return;
     }
 
     if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
 
         if (!pcmk_is_set(ra_data->status, remote_active)) {
             crm_debug("Disconnection from Pacemaker Remote node %s complete",
                       lrm_state->node_name);
 
         } else if (!remote_ra_is_in_maintenance(lrm_state)) {
             crm_err("Lost connection to Pacemaker Remote node %s",
                     lrm_state->node_name);
             ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
             ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
 
         } else {
             crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
                        lrm_state->node_name);
             /* Do roughly what a 'stop' on the remote-resource would do */
             handle_remote_ra_stop(lrm_state, NULL);
             remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
             /* now fake the reply of a successful 'stop' */
             synthesize_lrmd_success(NULL, lrm_state->node_name,
                                     PCMK_ACTION_STOP);
         }
         return;
     }
 
     if (!ra_data->cur_cmd) {
         crm_debug("no event to match");
         return;
     }
 
     cmd = ra_data->cur_cmd;
 
     /* Start actions and migrate from actions complete after connection
      * comes back to us. */
     if ((op->type == lrmd_event_connect)
         && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
                                 PCMK_ACTION_MIGRATE_FROM, NULL)) {
         if (op->connection_rc < 0) {
             update_remaining_timeout(cmd);
 
             if ((op->connection_rc == -ENOKEY)
                 || (op->connection_rc == -EKEYREJECTED)) {
                 // Hard error, don't retry
                 pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM,
                                  PCMK_EXEC_ERROR,
                                  pcmk_strerror(op->connection_rc));
 
             } else if (cmd->remaining_timeout > 3000) {
                 crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
                 g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
                 return;
 
             } else {
                 crm_trace("can't reschedule start, remaining timeout too small %d",
                           cmd->remaining_timeout);
                 pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                                     PCMK_EXEC_TIMEOUT,
                                     "%s without enough time to retry",
                                     pcmk_strerror(op->connection_rc));
             }
 
         } else {
             lrm_state_reset_tables(lrm_state, TRUE);
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             lrm_remote_set_flags(lrm_state, remote_active);
         }
 
         crm_debug("Remote connection event matched %s action", cmd->action);
         report_remote_ra_result(cmd);
         cmd_handled = TRUE;
 
     } else if ((op->type == lrmd_event_poke)
                && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                                pcmk__str_casei)) {
 
         if (cmd->monitor_timeout_id) {
             g_source_remove(cmd->monitor_timeout_id);
             cmd->monitor_timeout_id = 0;
         }
 
         /* Only report success the first time, after that only worry about failures.
          * For this function, if we get the poke pack, it is always a success. Pokes
          * only fail if the send fails, or the response times out. */
         if (!pcmk_is_set(cmd->status, cmd_reported_success)) {
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
             cmd_set_flags(cmd, cmd_reported_success);
         }
 
         crm_debug("Remote poke event matched %s action", cmd->action);
 
         /* success, keep rescheduling if interval is present. */
         if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) {
             ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
             cmd->interval_id = g_timeout_add(cmd->interval_ms,
                                              recurring_helper, cmd);
             cmd = NULL;         /* prevent free */
         }
         cmd_handled = TRUE;
 
     } else if ((op->type == lrmd_event_disconnect)
                && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                                pcmk__str_casei)) {
         if (pcmk_is_set(ra_data->status, remote_active) &&
             !pcmk_is_set(cmd->status, cmd_cancel)) {
             pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                              PCMK_EXEC_ERROR,
                              "Remote connection unexpectedly dropped "
                              "during monitor");
             report_remote_ra_result(cmd);
             crm_err("Remote connection to %s unexpectedly dropped during monitor",
                     lrm_state->node_name);
         }
         cmd_handled = TRUE;
 
     } else if ((op->type == lrmd_event_new_client)
                && pcmk__str_eq(cmd->action, PCMK_ACTION_STOP,
                                pcmk__str_casei)) {
 
         handle_remote_ra_stop(lrm_state, cmd);
         cmd_handled = TRUE;
 
     } else {
         crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
     }
 
     if (cmd_handled) {
         ra_data->cur_cmd = NULL;
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         free_cmd(cmd);
     }
 }
 
 static void
 handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
 {
     remote_ra_data_t *ra_data = NULL;
 
     CRM_ASSERT(lrm_state);
     ra_data = lrm_state->remote_ra_data;
 
     if (!pcmk_is_set(ra_data->status, takeover_complete)) {
         /* delete pending ops when ever the remote connection is intentionally stopped */
         g_hash_table_remove_all(lrm_state->active_ops);
     } else {
         /* we no longer hold the history if this connection has been migrated,
          * however, we keep metadata cache for future use */
         lrm_state_reset_tables(lrm_state, FALSE);
     }
 
     lrm_remote_clear_flags(lrm_state, remote_active);
     lrm_state_disconnect(lrm_state);
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     ra_data->cmds = NULL;
     ra_data->recurring_cmds = NULL;
     ra_data->cur_cmd = NULL;
 
     if (cmd) {
         pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
         report_remote_ra_result(cmd);
     }
 }
 
 // \return Standard Pacemaker return code
 static int
 handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
 {
     const char *server = NULL;
     lrmd_key_value_t *tmp = NULL;
     int port = 0;
     int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
     int rc = pcmk_rc_ok;
 
     for (tmp = cmd->params; tmp; tmp = tmp->next) {
         if (pcmk__strcase_any_of(tmp->key,
                                  PCMK_REMOTE_RA_ADDR, PCMK_REMOTE_RA_SERVER,
                                  NULL)) {
             server = tmp->value;
 
         } else if (pcmk__str_eq(tmp->key, PCMK_REMOTE_RA_PORT,
                                 pcmk__str_none)) {
             port = atoi(tmp->value);
 
         } else if (pcmk__str_eq(tmp->key, CRM_META "_" PCMK__META_CONTAINER,
                                 pcmk__str_none)) {
             lrm_remote_set_flags(lrm_state, controlling_guest);
         }
     }
 
     rc = controld_connect_remote_executor(lrm_state, server, port,
                                           timeout_used);
     if (rc != pcmk_rc_ok) {
         pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                             PCMK_EXEC_ERROR,
                             "Could not connect to Pacemaker Remote node %s: %s",
                             lrm_state->node_name, pcmk_rc_str(rc));
     }
     return rc;
 }
 
 static gboolean
 handle_remote_ra_exec(gpointer user_data)
 {
     int rc = 0;
     lrm_state_t *lrm_state = user_data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd;
     GList *first = NULL;
 
     if (ra_data->cur_cmd) {
         /* still waiting on previous cmd */
         return TRUE;
     }
 
     while (ra_data->cmds) {
         first = ra_data->cmds;
         cmd = first->data;
         if (cmd->delay_id) {
             /* still waiting for start delay timer to trip */
             return TRUE;
         }
 
         ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
         g_list_free_1(first);
 
         if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START,
                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
             lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete);
             if (handle_remote_ra_start(lrm_state, cmd,
                                        cmd->timeout) == pcmk_rc_ok) {
                 /* take care of this later when we get async connection result */
                 crm_debug("Initiated async remote connection, %s action will complete after connect event",
                           cmd->action);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) {
 
             if (lrm_state_is_connected(lrm_state) == TRUE) {
                 rc = lrm_state_poke_connection(lrm_state);
                 if (rc < 0) {
                     pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                                      PCMK_EXEC_ERROR, pcmk_strerror(rc));
                 }
             } else {
                 rc = -1;
                 pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING,
                                  PCMK_EXEC_DONE, "Remote connection inactive");
             }
 
             if (rc == 0) {
                 crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
                           cmd->rsc_id);
                 ra_data->cur_cmd = cmd;
                 cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) {
 
             if (pcmk_is_set(ra_data->status, expect_takeover)) {
                 /* briefly wait on stop for the takeover event to occur. If the
                  * takeover event does not occur during the wait period, that's fine.
                  * It just means that the remote-node's lrm_status section is going to get
                  * cleared which will require all the resources running in the remote-node
                  * to be explicitly re-detected via probe actions.  If the takeover does occur
                  * successfully, then we can leave the status section intact. */
                 cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
 
             handle_remote_ra_stop(lrm_state, cmd);
 
         } else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) {
             lrm_remote_clear_flags(lrm_state, takeover_complete);
             lrm_remote_set_flags(lrm_state, expect_takeover);
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
 
         } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD,
                                     PCMK_ACTION_RELOAD_AGENT, NULL))  {
             /* Currently the only reloadable parameter is
              * PCMK_REMOTE_RA_RECONNECT_INTERVAL, which is only used by the
              * scheduler via the CIB, so reloads are a no-op.
              *
              * @COMPAT DC <2.1.0: We only need to check for "reload" in case
              * we're in a rolling upgrade with a DC scheduling "reload" instead
              * of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway,
              * so this would work for that purpose as well.
              */
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
         }
 
         free_cmd(cmd);
     }
 
     return TRUE;
 }
 
 static void
 remote_ra_data_init(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = NULL;
 
     if (lrm_state->remote_ra_data) {
         return;
     }
 
     ra_data = pcmk__assert_alloc(1, sizeof(remote_ra_data_t));
     ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
     lrm_state->remote_ra_data = ra_data;
 }
 
 void
 remote_ra_cleanup(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     if (!ra_data) {
         return;
     }
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
 
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     mainloop_destroy_trigger(ra_data->work);
     free(ra_data);
     lrm_state->remote_ra_data = NULL;
 }
 
 gboolean
 is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
 {
     if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
         return TRUE;
     }
     if ((id != NULL) && (lrm_state_find(id) != NULL)
         && !pcmk__str_eq(id, controld_globals.our_nodename, pcmk__str_casei)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 lrmd_rsc_info_t *
 remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
 {
     lrmd_rsc_info_t *info = NULL;
 
     if ((lrm_state_find(rsc_id))) {
         info = pcmk__assert_alloc(1, sizeof(lrmd_rsc_info_t));
 
         info->id = pcmk__str_copy(rsc_id);
         info->type = pcmk__str_copy(REMOTE_LRMD_RA);
         info->standard = pcmk__str_copy(PCMK_RESOURCE_CLASS_OCF);
         info->provider = pcmk__str_copy("pacemaker");
     }
 
     return info;
 }
 
 static gboolean
 is_remote_ra_supported_action(const char *action)
 {
     return pcmk__str_any_of(action,
                             PCMK_ACTION_START,
                             PCMK_ACTION_STOP,
                             PCMK_ACTION_MONITOR,
                             PCMK_ACTION_MIGRATE_TO,
                             PCMK_ACTION_MIGRATE_FROM,
                             PCMK_ACTION_RELOAD_AGENT,
                             PCMK_ACTION_RELOAD,
                             NULL);
 }
 
 static GList *
 fail_all_monitor_cmds(GList * list)
 {
     GList *rm_list = NULL;
     remote_ra_cmd_t *cmd = NULL;
     GList *gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms > 0)
             && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                             pcmk__str_casei)) {
             rm_list = g_list_append(rm_list, cmd);
         }
     }
 
     for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
 
         pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                          PCMK_EXEC_ERROR, "Lost connection to remote executor");
         crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
                   cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
         report_remote_ra_result(cmd);
 
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
 
     /* frees only the list data, not the cmds */
     g_list_free(rm_list);
     return list;
 }
 
 static GList *
 remove_cmd(GList * list, const char *action, guint interval_ms)
 {
     remote_ra_cmd_t *cmd = NULL;
     GList *gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, action, pcmk__str_casei)) {
             break;
         }
         cmd = NULL;
     }
     if (cmd) {
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
     return list;
 }
 
 int
 remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
                  const char *action, guint interval_ms)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     connection_rsc = lrm_state_find(rsc_id);
     if (!connection_rsc || !connection_rsc->remote_ra_data) {
         return -EINVAL;
     }
 
     ra_data = connection_rsc->remote_ra_data;
     ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
     ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
                                          interval_ms);
     if (ra_data->cur_cmd &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         (pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) {
 
         cmd_set_flags(ra_data->cur_cmd, cmd_cancel);
     }
 
     return 0;
 }
 
 static remote_ra_cmd_t *
 handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
                    const char *userdata)
 {
     GList *gIter = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     /* there are 3 places a potential duplicate monitor operation
      * could exist.
      * 1. recurring_cmds list. where the op is waiting for its next interval
      * 2. cmds list, where the op is queued to get executed immediately
      * 3. cur_cmd, which means the monitor op is in flight right now.
      */
     if (interval_ms == 0) {
         return NULL;
     }
 
     if (ra_data->cur_cmd &&
         !pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) &&
         (ra_data->cur_cmd->interval_ms == interval_ms)
         && pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR,
                         pcmk__str_casei)) {
 
         cmd = ra_data->cur_cmd;
         goto handle_dup;
     }
 
     for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                             pcmk__str_casei)) {
             goto handle_dup;
         }
     }
 
     for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                             pcmk__str_casei)) {
             goto handle_dup;
         }
     }
 
     return NULL;
 
 handle_dup:
 
     crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
               cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms);
 
     /* update the userdata */
     if (userdata) {
        free(cmd->userdata);
        cmd->userdata = pcmk__str_copy(userdata);
     }
 
     /* if we've already reported success, generate a new call id */
     if (pcmk_is_set(cmd->status, cmd_reported_success)) {
         cmd->start_time = time(NULL);
         cmd->call_id = generate_callid();
         cmd_clear_flags(cmd, cmd_reported_success);
     }
 
     /* if we have an interval_id set, that means we are in the process of
      * waiting for this cmd's next interval. instead of waiting, cancel
      * the timer and execute the action immediately */
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
         cmd->interval_id = 0;
         recurring_helper(cmd);
     }
 
     return cmd;
 }
 
 /*!
  * \internal
  * \brief Execute an action using the (internal) ocf:pacemaker:remote agent
  *
  * \param[in]     lrm_state      Executor state object for remote connection
  * \param[in]     rsc_id         Connection resource ID
  * \param[in]     action         Action to execute
  * \param[in]     userdata       String to copy and pass to execution callback
  * \param[in]     interval_ms    Action interval (in milliseconds)
  * \param[in]     timeout_ms     Action timeout (in milliseconds)
  * \param[in]     start_delay_ms Delay (in milliseconds) before executing action
  * \param[in,out] params         Connection resource parameters
  * \param[out]    call_id        Where to store call ID on success
  *
  * \return Standard Pacemaker return code
  * \note This takes ownership of \p params, which should not be used or freed
  *       after calling this function.
  */
 int
 controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id,
                               const char *action, const char *userdata,
                               guint interval_ms, int timeout_ms,
                               int start_delay_ms, lrmd_key_value_t *params,
                               int *call_id)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_cmd_t *cmd = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     *call_id = 0;
 
     CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL)
               && (userdata != NULL) && (call_id != NULL),
               lrmd_key_value_freeall(params); return EINVAL);
 
     if (!is_remote_ra_supported_action(action)) {
         lrmd_key_value_freeall(params);
         return EOPNOTSUPP;
     }
 
     connection_rsc = lrm_state_find(rsc_id);
     if (connection_rsc == NULL) {
         lrmd_key_value_freeall(params);
         return ENOTCONN;
     }
 
     remote_ra_data_init(connection_rsc);
     ra_data = connection_rsc->remote_ra_data;
 
     cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
     if (cmd) {
         *call_id = cmd->call_id;
         lrmd_key_value_freeall(params);
         return pcmk_rc_ok;
     }
 
     cmd = pcmk__assert_alloc(1, sizeof(remote_ra_cmd_t));
 
     cmd->owner = pcmk__str_copy(lrm_state->node_name);
     cmd->rsc_id = pcmk__str_copy(rsc_id);
     cmd->action = pcmk__str_copy(action);
     cmd->userdata = pcmk__str_copy(userdata);
     cmd->interval_ms = interval_ms;
     cmd->timeout = timeout_ms;
     cmd->start_delay = start_delay_ms;
     cmd->params = params;
     cmd->start_time = time(NULL);
 
     cmd->call_id = generate_callid();
 
     if (cmd->start_delay) {
         cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
     }
 
     ra_data->cmds = g_list_append(ra_data->cmds, cmd);
     mainloop_set_trigger(ra_data->work);
 
     *call_id = cmd->call_id;
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Immediately fail all monitors of a remote node, if proxied here
  *
  * \param[in] node_name  Name of pacemaker_remote node
  */
 void
 remote_ra_fail(const char *node_name)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (lrm_state && lrm_state_is_connected(lrm_state)) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         crm_info("Failing monitors on Pacemaker Remote node %s", node_name);
         ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
         ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
     }
 }
 
 /* A guest node fencing implied by host fencing looks like:
  *
  *  <pseudo_event id="103" operation="stonith" operation_key="stonith-lxc1-off"
  *                on_node="lxc1" on_node_uuid="lxc1">
  *     <attributes CRM_meta_on_node="lxc1" CRM_meta_on_node_uuid="lxc1"
  *                 CRM_meta_stonith_action="off" crm_feature_set="3.0.12"/>
  *     <downed>
  *       <node id="lxc1"/>
  *     </downed>
  *  </pseudo_event>
  */
 #define XPATH_PSEUDO_FENCE "/" PCMK__XE_PSEUDO_EVENT \
     "[@" PCMK_XA_OPERATION "='stonith']/" PCMK__XE_DOWNED "/" PCMK_XE_NODE
 
 /*!
  * \internal
  * \brief Check a pseudo-action for Pacemaker Remote node side effects
  *
  * \param[in,out] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_pseudo(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *result = getXpathResult(search, 0);
 
         /* Normally, we handle the necessary side effects of a guest node stop
          * action when reporting the remote agent's result. However, if the stop
          * is implied due to fencing, it will be a fencing pseudo-event, and
          * there won't be a result to report. Handle that case here.
          *
          * This will result in a duplicate call to remote_node_down() if the
          * guest stop was real instead of implied, but that shouldn't hurt.
          *
          * There is still one corner case that isn't handled: if a guest node
          * isn't running any resources when its host is fenced, it will appear
          * to be cleanly stopped, so there will be no pseudo-fence, and our
          * peer cache state will be incorrect unless and until the guest is
          * recovered.
          */
         if (result) {
             const char *remote = pcmk__xe_id(result);
 
             if (remote) {
                 remote_node_down(remote, DOWN_ERASE_LRM);
             }
         }
     }
     freeXpathObject(search);
 }
 
 static void
 remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
 {
     xmlNode *update, *state;
     int call_opt;
     crm_node_t *node;
 
     call_opt = crmd_cib_smart_opt();
     node = pcmk__cluster_lookup_remote_node(lrm_state->node_name);
     CRM_CHECK(node != NULL, return);
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
     state = create_node_state_update(node, node_update_none, update,
                                      __func__);
     crm_xml_add(state, PCMK__XA_NODE_IN_MAINTENANCE, (maintenance? "1" : "0"));
     if (controld_update_cib(PCMK_XE_STATUS, update, call_opt,
                             NULL) == pcmk_rc_ok) {
         /* TODO: still not 100% sure that async update will succeed ... */
         if (maintenance) {
             lrm_remote_set_flags(lrm_state, remote_in_maint);
         } else {
             lrm_remote_clear_flags(lrm_state, remote_in_maint);
         }
     }
     pcmk__xml_free(update);
 }
 
 #define XPATH_PSEUDO_MAINTENANCE "//" PCMK__XE_PSEUDO_EVENT         \
     "[@" PCMK_XA_OPERATION "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \
     PCMK__XE_MAINTENANCE
 
 /*!
  * \internal
  * \brief Check a pseudo-action holding updates for maintenance state
  *
  * \param[in,out] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_maintenance_nodes(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *node;
         int cnt = 0, cnt_remote = 0;
 
         for (node = pcmk__xe_first_child(getXpathResult(search, 0),
                                          PCMK_XE_NODE, NULL, NULL);
              node != NULL; node = pcmk__xe_next_same(node)) {
 
             lrm_state_t *lrm_state = lrm_state_find(pcmk__xe_id(node));
 
             cnt++;
             if (lrm_state && lrm_state->remote_ra_data &&
                 pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) {
 
                 const char *in_maint_s = NULL;
                 int in_maint;
 
                 cnt_remote++;
                 in_maint_s = crm_element_value(node,
                                                PCMK__XA_NODE_IN_MAINTENANCE);
                 pcmk__scan_min_int(in_maint_s, &in_maint, 0);
                 remote_ra_maintenance(lrm_state, in_maint);
             }
         }
         crm_trace("Action holds %d nodes (%d remotes found) adjusting "
                   PCMK_OPT_MAINTENANCE_MODE,
                   cnt, cnt_remote);
     }
     freeXpathObject(search);
 }
 
 gboolean
 remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     return pcmk_is_set(ra_data->status, remote_in_maint);
 }
 
 gboolean
 remote_ra_controlling_guest(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     return pcmk_is_set(ra_data->status, controlling_guest);
 }
diff --git a/include/crm/lrmd_events.h b/include/crm/lrmd_events.h
index f0a9fd1bb6..44ddbaa1f1 100644
--- a/include/crm/lrmd_events.h
+++ b/include/crm/lrmd_events.h
@@ -1,110 +1,110 @@
 /*
  * Copyright 2012-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_LRMD_EVENTS__H
 #define PCMK__CRM_LRMD_EVENTS__H
 
 #include <sys/types.h>          // time_t
 
 #include <glib.h>               // guint
 #include <crm/common/results.h> // enum ocf_exitcode
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /**
  * \file
  * \brief Resource agent executor events
  * \ingroup lrmd
  */
 
 enum lrmd_callback_event {
     lrmd_event_register,
     lrmd_event_unregister,
     lrmd_event_exec_complete,
     lrmd_event_disconnect,
     lrmd_event_connect,
     lrmd_event_poke,
     lrmd_event_new_client,
 };
 
 typedef struct lrmd_event_data_s {
     /*! Type of event, register, unregister, call_completed... */
     enum lrmd_callback_event type;
 
     /*! The resource this event occurred on. */
     const char *rsc_id;
     /*! The action performed, start, stop, monitor... */
     const char *op_type;
     /*! The user data passed by caller of exec() API function */
     const char *user_data;
 
     /*! The client api call id associated with this event */
     int call_id;
 
     /*! The operation's timeout period in ms. */
     int timeout;
 
     /*! The operation's recurring interval in ms. */
     guint interval_ms;
 
     /*! The operation's start delay value in ms. */
     int start_delay;
 
     /*! This operation that just completed is on a deleted rsc. */
     int rsc_deleted;
 
     /*! The executed ra return code mapped to OCF */
     enum ocf_exitcode rc;
 
     /*! The executor status returned for exec_complete events */
     int op_status;
 
     /*! stdout from resource agent operation */
     const char *output;
 
     /*! Timestamp of when op ran */
     time_t t_run;
 
     /*! Timestamp of last rc change */
-    unsigned int t_rcchange;
+    time_t t_rcchange;
 
     /*! Time in length op took to execute */
     unsigned int exec_time;
 
     /*! Time in length spent in queue */
     unsigned int queue_time;
 
     /*! int connection result. Used for connection and poke events */
     int connection_rc;
 
     /* This is a GHashTable containing the
      * parameters given to the operation */
     void *params;
 
     /*! client node name associated with this connection
      * (used to match actions to the proper client when there are multiple)
      */
     const char *remote_nodename;
 
     /*! exit failure reason string from resource agent operation */
     const char *exit_reason;
 } lrmd_event_data_t;
 
 lrmd_event_data_t *lrmd_new_event(const char *rsc_id, const char *task,
                                   guint interval_ms);
 lrmd_event_data_t *lrmd_copy_event(lrmd_event_data_t *event);
 void lrmd_free_event(lrmd_event_data_t *event);
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PCMK__CRM_LRMD_EVENTS__H
diff --git a/lib/lrmd/lrmd_client.c b/lib/lrmd/lrmd_client.c
index 8fe243cea1..5e775e7422 100644
--- a/lib/lrmd/lrmd_client.c
+++ b/lib/lrmd/lrmd_client.c
@@ -1,2583 +1,2583 @@
 /*
  * Copyright 2012-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <unistd.h>
 #include <stdlib.h>
 #include <stdio.h>
 #include <stdint.h>         // uint32_t, uint64_t
 #include <stdarg.h>
 #include <string.h>
 #include <ctype.h>
 #include <errno.h>
 
 #include <sys/types.h>
 #include <sys/wait.h>
 
 #include <glib.h>
 #include <dirent.h>
 
 #include <crm/crm.h>
 #include <crm/lrmd.h>
 #include <crm/lrmd_internal.h>
 #include <crm/services.h>
 #include <crm/services_internal.h>
 #include <crm/common/mainloop.h>
 #include <crm/common/ipc_internal.h>
 #include <crm/common/remote_internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>   // stonith__*
 
 #include <gnutls/gnutls.h>
 #include <sys/socket.h>
 #include <netinet/in.h>
 #include <netinet/ip.h>
 #include <arpa/inet.h>
 #include <netdb.h>
 
 #define MAX_TLS_RECV_WAIT 10000
 
 CRM_TRACE_INIT_DATA(lrmd);
 
 static int lrmd_api_disconnect(lrmd_t * lrmd);
 static int lrmd_api_is_connected(lrmd_t * lrmd);
 
 /* IPC proxy functions */
 int lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg);
 static void lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg);
 void lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg));
 
 // GnuTLS client handshake timeout in seconds
 #define TLS_HANDSHAKE_TIMEOUT 5
 
 gnutls_psk_client_credentials_t psk_cred_s;
 static void lrmd_tls_disconnect(lrmd_t * lrmd);
 static int global_remote_msg_id = 0;
 static void lrmd_tls_connection_destroy(gpointer userdata);
 
 typedef struct lrmd_private_s {
     uint64_t type;
     char *token;
     mainloop_io_t *source;
 
     /* IPC parameters */
     crm_ipc_t *ipc;
 
     pcmk__remote_t *remote;
 
     /* Extra TLS parameters */
     char *remote_nodename;
     char *server;
     int port;
     gnutls_psk_client_credentials_t psk_cred_c;
 
     /* while the async connection is occurring, this is the id
      * of the connection timeout timer. */
     int async_timer;
     int sock;
     /* since tls requires a round trip across the network for a
      * request/reply, there are times where we just want to be able
      * to send a request from the client and not wait around (or even care
      * about) what the reply is. */
     int expected_late_replies;
     GList *pending_notify;
     crm_trigger_t *process_notify;
 
     lrmd_event_callback callback;
 
     /* Internal IPC proxy msg passing for remote guests */
     void (*proxy_callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg);
     void *proxy_callback_userdata;
     char *peer_version;
 } lrmd_private_t;
 
 static lrmd_list_t *
 lrmd_list_add(lrmd_list_t * head, const char *value)
 {
     lrmd_list_t *p, *end;
 
     p = pcmk__assert_alloc(1, sizeof(lrmd_list_t));
     p->val = strdup(value);
 
     end = head;
     while (end && end->next) {
         end = end->next;
     }
 
     if (end) {
         end->next = p;
     } else {
         head = p;
     }
 
     return head;
 }
 
 void
 lrmd_list_freeall(lrmd_list_t * head)
 {
     lrmd_list_t *p;
 
     while (head) {
         char *val = (char *)head->val;
 
         p = head->next;
         free(val);
         free(head);
         head = p;
     }
 }
 
 lrmd_key_value_t *
 lrmd_key_value_add(lrmd_key_value_t * head, const char *key, const char *value)
 {
     lrmd_key_value_t *p, *end;
 
     p = pcmk__assert_alloc(1, sizeof(lrmd_key_value_t));
     p->key = strdup(key);
     p->value = strdup(value);
 
     end = head;
     while (end && end->next) {
         end = end->next;
     }
 
     if (end) {
         end->next = p;
     } else {
         head = p;
     }
 
     return head;
 }
 
 void
 lrmd_key_value_freeall(lrmd_key_value_t * head)
 {
     lrmd_key_value_t *p;
 
     while (head) {
         p = head->next;
         free(head->key);
         free(head->value);
         free(head);
         head = p;
     }
 }
 
 /*!
  * \brief Create a new lrmd_event_data_t object
  *
  * \param[in] rsc_id       ID of resource involved in event
  * \param[in] task         Action name
  * \param[in] interval_ms  Action interval
  *
  * \return Newly allocated and initialized lrmd_event_data_t
  * \note This functions asserts on memory errors, so the return value is
  *       guaranteed to be non-NULL. The caller is responsible for freeing the
  *       result with lrmd_free_event().
  */
 lrmd_event_data_t *
 lrmd_new_event(const char *rsc_id, const char *task, guint interval_ms)
 {
     lrmd_event_data_t *event = pcmk__assert_alloc(1, sizeof(lrmd_event_data_t));
 
     // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees
     event->rsc_id = pcmk__str_copy(rsc_id);
     event->op_type = pcmk__str_copy(task);
     event->interval_ms = interval_ms;
     return event;
 }
 
 lrmd_event_data_t *
 lrmd_copy_event(lrmd_event_data_t * event)
 {
     lrmd_event_data_t *copy = NULL;
 
     copy = pcmk__assert_alloc(1, sizeof(lrmd_event_data_t));
 
     copy->type = event->type;
 
     // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees
     copy->rsc_id = pcmk__str_copy(event->rsc_id);
     copy->op_type = pcmk__str_copy(event->op_type);
     copy->user_data = pcmk__str_copy(event->user_data);
     copy->output = pcmk__str_copy(event->output);
     copy->remote_nodename = pcmk__str_copy(event->remote_nodename);
     copy->exit_reason = pcmk__str_copy(event->exit_reason);
 
     copy->call_id = event->call_id;
     copy->timeout = event->timeout;
     copy->interval_ms = event->interval_ms;
     copy->start_delay = event->start_delay;
     copy->rsc_deleted = event->rsc_deleted;
     copy->rc = event->rc;
     copy->op_status = event->op_status;
     copy->t_run = event->t_run;
     copy->t_rcchange = event->t_rcchange;
     copy->exec_time = event->exec_time;
     copy->queue_time = event->queue_time;
     copy->connection_rc = event->connection_rc;
     copy->params = pcmk__str_table_dup(event->params);
 
     return copy;
 }
 
 /*!
  * \brief Free an executor event
  *
  * \param[in,out]  Executor event object to free
  */
 void
 lrmd_free_event(lrmd_event_data_t *event)
 {
     if (event == NULL) {
         return;
     }
     // @TODO Why are these const char *?
     free((void *) event->rsc_id);
     free((void *) event->op_type);
     free((void *) event->user_data);
     free((void *) event->remote_nodename);
     lrmd__reset_result(event);
     if (event->params != NULL) {
         g_hash_table_destroy(event->params);
     }
     free(event);
 }
 
 static void
 lrmd_dispatch_internal(lrmd_t * lrmd, xmlNode * msg)
 {
     const char *type;
     const char *proxy_session = crm_element_value(msg,
                                                   PCMK__XA_LRMD_IPC_SESSION);
     lrmd_private_t *native = lrmd->lrmd_private;
     lrmd_event_data_t event = { 0, };
 
     if (proxy_session != NULL) {
         /* this is proxy business */
         lrmd_internal_proxy_dispatch(lrmd, msg);
         return;
     } else if (!native->callback) {
         /* no callback set */
         crm_trace("notify event received but client has not set callback");
         return;
     }
 
     event.remote_nodename = native->remote_nodename;
     type = crm_element_value(msg, PCMK__XA_LRMD_OP);
     crm_element_value_int(msg, PCMK__XA_LRMD_CALLID, &event.call_id);
     event.rsc_id = crm_element_value(msg, PCMK__XA_LRMD_RSC_ID);
 
     if (pcmk__str_eq(type, LRMD_OP_RSC_REG, pcmk__str_none)) {
         event.type = lrmd_event_register;
     } else if (pcmk__str_eq(type, LRMD_OP_RSC_UNREG, pcmk__str_none)) {
         event.type = lrmd_event_unregister;
     } else if (pcmk__str_eq(type, LRMD_OP_RSC_EXEC, pcmk__str_none)) {
         int rc = 0;
         int exec_time = 0;
         int queue_time = 0;
         time_t epoch = 0;
 
         crm_element_value_int(msg, PCMK__XA_LRMD_TIMEOUT, &event.timeout);
         crm_element_value_ms(msg, PCMK__XA_LRMD_RSC_INTERVAL,
                              &event.interval_ms);
         crm_element_value_int(msg, PCMK__XA_LRMD_RSC_START_DELAY,
                               &event.start_delay);
 
         crm_element_value_int(msg, PCMK__XA_LRMD_EXEC_RC, &rc);
         event.rc = (enum ocf_exitcode) rc;
 
         crm_element_value_int(msg, PCMK__XA_LRMD_EXEC_OP_STATUS,
                               &event.op_status);
         crm_element_value_int(msg, PCMK__XA_LRMD_RSC_DELETED,
                               &event.rsc_deleted);
 
         crm_element_value_epoch(msg, PCMK__XA_LRMD_RUN_TIME, &epoch);
         event.t_run = epoch;
 
         crm_element_value_epoch(msg, PCMK__XA_LRMD_RCCHANGE_TIME, &epoch);
-        event.t_rcchange = (unsigned int) epoch;
+        event.t_rcchange = epoch;
 
         crm_element_value_int(msg, PCMK__XA_LRMD_EXEC_TIME, &exec_time);
         CRM_LOG_ASSERT(exec_time >= 0);
         event.exec_time = QB_MAX(0, exec_time);
 
         crm_element_value_int(msg, PCMK__XA_LRMD_QUEUE_TIME, &queue_time);
         CRM_LOG_ASSERT(queue_time >= 0);
         event.queue_time = QB_MAX(0, queue_time);
 
         event.op_type = crm_element_value(msg, PCMK__XA_LRMD_RSC_ACTION);
         event.user_data = crm_element_value(msg,
                                             PCMK__XA_LRMD_RSC_USERDATA_STR);
         event.type = lrmd_event_exec_complete;
 
         /* output and exit_reason may be freed by a callback */
         event.output = crm_element_value_copy(msg, PCMK__XA_LRMD_RSC_OUTPUT);
         lrmd__set_result(&event, event.rc, event.op_status,
                          crm_element_value(msg, PCMK__XA_LRMD_RSC_EXIT_REASON));
 
         event.params = xml2list(msg);
     } else if (pcmk__str_eq(type, LRMD_OP_NEW_CLIENT, pcmk__str_none)) {
         event.type = lrmd_event_new_client;
     } else if (pcmk__str_eq(type, LRMD_OP_POKE, pcmk__str_none)) {
         event.type = lrmd_event_poke;
     } else {
         return;
     }
 
     crm_trace("op %s notify event received", type);
     native->callback(&event);
 
     if (event.params) {
         g_hash_table_destroy(event.params);
     }
     lrmd__reset_result(&event);
 }
 
 // \return Always 0, to indicate that IPC mainloop source should be kept
 static int
 lrmd_ipc_dispatch(const char *buffer, ssize_t length, gpointer userdata)
 {
     lrmd_t *lrmd = userdata;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     if (native->callback != NULL) {
         xmlNode *msg = pcmk__xml_parse(buffer);
 
         lrmd_dispatch_internal(lrmd, msg);
         pcmk__xml_free(msg);
     }
     return 0;
 }
 
 static void
 lrmd_free_xml(gpointer userdata)
 {
     pcmk__xml_free((xmlNode *) userdata);
 }
 
 static bool
 remote_executor_connected(lrmd_t * lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     return (native->remote->tls_session != NULL);
 }
 
 /*!
  * \internal
  * \brief TLS dispatch function (for both trigger and file descriptor sources)
  *
  * \param[in,out] userdata  API connection
  *
  * \return Always return a nonnegative value, which as a file descriptor
  *         dispatch function means keep the mainloop source, and as a
  *         trigger dispatch function, 0 means remove the trigger from the
  *         mainloop while 1 means keep it (and job completed)
  */
 static int
 lrmd_tls_dispatch(gpointer userdata)
 {
     lrmd_t *lrmd = userdata;
     lrmd_private_t *native = lrmd->lrmd_private;
     xmlNode *xml = NULL;
     int rc = pcmk_rc_ok;
 
     if (!remote_executor_connected(lrmd)) {
         crm_trace("TLS dispatch triggered after disconnect");
         return 0;
     }
 
     crm_trace("TLS dispatch triggered");
 
     /* First check if there are any pending notifies to process that came
      * while we were waiting for replies earlier. */
     if (native->pending_notify) {
         GList *iter = NULL;
 
         crm_trace("Processing pending notifies");
         for (iter = native->pending_notify; iter; iter = iter->next) {
             lrmd_dispatch_internal(lrmd, iter->data);
         }
         g_list_free_full(native->pending_notify, lrmd_free_xml);
         native->pending_notify = NULL;
     }
 
     /* Next read the current buffer and see if there are any messages to handle. */
     switch (pcmk__remote_ready(native->remote, 0)) {
         case pcmk_rc_ok:
             rc = pcmk__read_remote_message(native->remote, -1);
             xml = pcmk__remote_message_xml(native->remote);
             break;
         case ETIME:
             // Nothing to read, check if a full message is already in buffer
             xml = pcmk__remote_message_xml(native->remote);
             break;
         default:
             rc = ENOTCONN;
             break;
     }
     while (xml) {
         const char *msg_type = crm_element_value(xml,
                                                  PCMK__XA_LRMD_REMOTE_MSG_TYPE);
         if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) {
             lrmd_dispatch_internal(lrmd, xml);
         } else if (pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) {
             if (native->expected_late_replies > 0) {
                 native->expected_late_replies--;
             } else {
                 int reply_id = 0;
                 crm_element_value_int(xml, PCMK__XA_LRMD_CALLID, &reply_id);
                 /* if this happens, we want to know about it */
                 crm_err("Got outdated Pacemaker Remote reply %d", reply_id);
             }
         }
         pcmk__xml_free(xml);
         xml = pcmk__remote_message_xml(native->remote);
     }
 
     if (rc == ENOTCONN) {
         crm_info("Lost %s executor connection while reading data",
                  (native->remote_nodename? native->remote_nodename : "local"));
         lrmd_tls_disconnect(lrmd);
         return 0;
     }
     return 1;
 }
 
 /* Not used with mainloop */
 int
 lrmd_poll(lrmd_t * lrmd, int timeout)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     switch (native->type) {
         case pcmk__client_ipc:
             return crm_ipc_ready(native->ipc);
 
         case pcmk__client_tls:
             if (native->pending_notify) {
                 return 1;
             } else {
                 int rc = pcmk__remote_ready(native->remote, 0);
 
                 switch (rc) {
                     case pcmk_rc_ok:
                         return 1;
                     case ETIME:
                         return 0;
                     default:
                         return pcmk_rc2legacy(rc);
                 }
             }
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     native->type);
             return -EPROTONOSUPPORT;
     }
 }
 
 /* Not used with mainloop */
 bool
 lrmd_dispatch(lrmd_t * lrmd)
 {
     lrmd_private_t *private = NULL;
 
     CRM_ASSERT(lrmd != NULL);
 
     private = lrmd->lrmd_private;
     switch (private->type) {
         case pcmk__client_ipc:
             while (crm_ipc_ready(private->ipc)) {
                 if (crm_ipc_read(private->ipc) > 0) {
                     const char *msg = crm_ipc_buffer(private->ipc);
 
                     lrmd_ipc_dispatch(msg, strlen(msg), lrmd);
                 }
             }
             break;
         case pcmk__client_tls:
             lrmd_tls_dispatch(lrmd);
             break;
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     private->type);
     }
 
     if (lrmd_api_is_connected(lrmd) == FALSE) {
         crm_err("Connection closed");
         return FALSE;
     }
 
     return TRUE;
 }
 
 static xmlNode *
 lrmd_create_op(const char *token, const char *op, xmlNode *data, int timeout,
                enum lrmd_call_options options)
 {
     xmlNode *op_msg = NULL;
 
     CRM_CHECK(token != NULL, return NULL);
 
     op_msg = pcmk__xe_create(NULL, PCMK__XE_LRMD_COMMAND);
     crm_xml_add(op_msg, PCMK__XA_T, PCMK__VALUE_LRMD);
     crm_xml_add(op_msg, PCMK__XA_LRMD_OP, op);
     crm_xml_add_int(op_msg, PCMK__XA_LRMD_TIMEOUT, timeout);
     crm_xml_add_int(op_msg, PCMK__XA_LRMD_CALLOPT, options);
 
     if (data != NULL) {
         xmlNode *wrapper = pcmk__xe_create(op_msg, PCMK__XE_LRMD_CALLDATA);
 
         pcmk__xml_copy(wrapper, data);
     }
 
     crm_trace("Created executor %s command with call options %.8lx (%d)",
               op, (long)options, options);
     return op_msg;
 }
 
 static void
 lrmd_ipc_connection_destroy(gpointer userdata)
 {
     lrmd_t *lrmd = userdata;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     switch (native->type) {
         case pcmk__client_ipc:
             crm_info("Disconnected from local executor");
             break;
         case pcmk__client_tls:
             crm_info("Disconnected from remote executor on %s",
                      native->remote_nodename);
             break;
         default:
             crm_err("Unsupported executor connection type %d (bug?)",
                     native->type);
     }
 
     /* Prevent these from being cleaned up in lrmd_api_disconnect() */
     native->ipc = NULL;
     native->source = NULL;
 
     if (native->callback) {
         lrmd_event_data_t event = { 0, };
         event.type = lrmd_event_disconnect;
         event.remote_nodename = native->remote_nodename;
         native->callback(&event);
     }
 }
 
 static void
 lrmd_tls_connection_destroy(gpointer userdata)
 {
     lrmd_t *lrmd = userdata;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     crm_info("TLS connection destroyed");
 
     if (native->remote->tls_session) {
         gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR);
         gnutls_deinit(*native->remote->tls_session);
         gnutls_free(native->remote->tls_session);
         native->remote->tls_session = NULL;
     }
     if (native->psk_cred_c) {
         gnutls_psk_free_client_credentials(native->psk_cred_c);
     }
     if (native->sock) {
         close(native->sock);
     }
     if (native->process_notify) {
         mainloop_destroy_trigger(native->process_notify);
         native->process_notify = NULL;
     }
     if (native->pending_notify) {
         g_list_free_full(native->pending_notify, lrmd_free_xml);
         native->pending_notify = NULL;
     }
 
     free(native->remote->buffer);
     free(native->remote->start_state);
     native->remote->buffer = NULL;
     native->remote->start_state = NULL;
     native->source = 0;
     native->sock = 0;
     native->psk_cred_c = NULL;
     native->sock = 0;
 
     if (native->callback) {
         lrmd_event_data_t event = { 0, };
         event.remote_nodename = native->remote_nodename;
         event.type = lrmd_event_disconnect;
         native->callback(&event);
     }
     return;
 }
 
 // \return Standard Pacemaker return code
 int
 lrmd__remote_send_xml(pcmk__remote_t *session, xmlNode *msg, uint32_t id,
                       const char *msg_type)
 {
     crm_xml_add_int(msg, PCMK__XA_LRMD_REMOTE_MSG_ID, id);
     crm_xml_add(msg, PCMK__XA_LRMD_REMOTE_MSG_TYPE, msg_type);
     return pcmk__remote_send_xml(session, msg);
 }
 
 // \return Standard Pacemaker return code
 static int
 read_remote_reply(lrmd_t *lrmd, int total_timeout, int expected_reply_id,
                   xmlNode **reply)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
     time_t start = time(NULL);
     const char *msg_type = NULL;
     int reply_id = 0;
     int remaining_timeout = 0;
     int rc = pcmk_rc_ok;
 
     /* A timeout of 0 here makes no sense.  We have to wait a period of time
      * for the response to come back.  If -1 or 0, default to 10 seconds. */
     if (total_timeout <= 0 || total_timeout > MAX_TLS_RECV_WAIT) {
         total_timeout = MAX_TLS_RECV_WAIT;
     }
 
     for (*reply = NULL; *reply == NULL; ) {
 
         *reply = pcmk__remote_message_xml(native->remote);
         if (*reply == NULL) {
             /* read some more off the tls buffer if we still have time left. */
             if (remaining_timeout) {
                 remaining_timeout = total_timeout - ((time(NULL) - start) * 1000);
             } else {
                 remaining_timeout = total_timeout;
             }
             if (remaining_timeout <= 0) {
                 return ETIME;
             }
 
             rc = pcmk__read_remote_message(native->remote, remaining_timeout);
             if (rc != pcmk_rc_ok) {
                 return rc;
             }
 
             *reply = pcmk__remote_message_xml(native->remote);
             if (*reply == NULL) {
                 return ENOMSG;
             }
         }
 
         crm_element_value_int(*reply, PCMK__XA_LRMD_REMOTE_MSG_ID, &reply_id);
         msg_type = crm_element_value(*reply, PCMK__XA_LRMD_REMOTE_MSG_TYPE);
 
         if (!msg_type) {
             crm_err("Empty msg type received while waiting for reply");
             pcmk__xml_free(*reply);
             *reply = NULL;
         } else if (pcmk__str_eq(msg_type, "notify", pcmk__str_casei)) {
             /* got a notify while waiting for reply, trigger the notify to be processed later */
             crm_info("queueing notify");
             native->pending_notify = g_list_append(native->pending_notify, *reply);
             if (native->process_notify) {
                 crm_info("notify trigger set.");
                 mainloop_set_trigger(native->process_notify);
             }
             *reply = NULL;
         } else if (!pcmk__str_eq(msg_type, "reply", pcmk__str_casei)) {
             /* msg isn't a reply, make some noise */
             crm_err("Expected a reply, got %s", msg_type);
             pcmk__xml_free(*reply);
             *reply = NULL;
         } else if (reply_id != expected_reply_id) {
             if (native->expected_late_replies > 0) {
                 native->expected_late_replies--;
             } else {
                 crm_err("Got outdated reply, expected id %d got id %d", expected_reply_id, reply_id);
             }
             pcmk__xml_free(*reply);
             *reply = NULL;
         }
     }
 
     if (native->remote->buffer && native->process_notify) {
         mainloop_set_trigger(native->process_notify);
     }
 
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 send_remote_message(lrmd_t *lrmd, xmlNode *msg)
 {
     int rc = pcmk_rc_ok;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     global_remote_msg_id++;
     if (global_remote_msg_id <= 0) {
         global_remote_msg_id = 1;
     }
 
     rc = lrmd__remote_send_xml(native->remote, msg, global_remote_msg_id,
                                "request");
     if (rc != pcmk_rc_ok) {
         crm_err("Disconnecting because TLS message could not be sent to "
                 "Pacemaker Remote: %s", pcmk_rc_str(rc));
         lrmd_tls_disconnect(lrmd);
     }
     return rc;
 }
 
 static int
 lrmd_tls_send_recv(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply)
 {
     int rc = 0;
     xmlNode *xml = NULL;
 
     if (!remote_executor_connected(lrmd)) {
         return -ENOTCONN;
     }
 
     rc = send_remote_message(lrmd, msg);
     if (rc != pcmk_rc_ok) {
         return pcmk_rc2legacy(rc);
     }
 
     rc = read_remote_reply(lrmd, timeout, global_remote_msg_id, &xml);
     if (rc != pcmk_rc_ok) {
         crm_err("Disconnecting remote after request %d reply not received: %s "
                 QB_XS " rc=%d timeout=%dms",
                 global_remote_msg_id, pcmk_rc_str(rc), rc, timeout);
         lrmd_tls_disconnect(lrmd);
     }
 
     if (reply) {
         *reply = xml;
     } else {
         pcmk__xml_free(xml);
     }
 
     return pcmk_rc2legacy(rc);
 }
 
 static int
 lrmd_send_xml(lrmd_t * lrmd, xmlNode * msg, int timeout, xmlNode ** reply)
 {
     int rc = pcmk_ok;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     switch (native->type) {
         case pcmk__client_ipc:
             rc = crm_ipc_send(native->ipc, msg, crm_ipc_client_response, timeout, reply);
             break;
         case pcmk__client_tls:
             rc = lrmd_tls_send_recv(lrmd, msg, timeout, reply);
             break;
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     native->type);
             rc = -EPROTONOSUPPORT;
     }
 
     return rc;
 }
 
 static int
 lrmd_send_xml_no_reply(lrmd_t * lrmd, xmlNode * msg)
 {
     int rc = pcmk_ok;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     switch (native->type) {
         case pcmk__client_ipc:
             rc = crm_ipc_send(native->ipc, msg, crm_ipc_flags_none, 0, NULL);
             break;
         case pcmk__client_tls:
             rc = send_remote_message(lrmd, msg);
             if (rc == pcmk_rc_ok) {
                 /* we don't want to wait around for the reply, but
                  * since the request/reply protocol needs to behave the same
                  * as libqb, a reply will eventually come later anyway. */
                 native->expected_late_replies++;
             }
             rc = pcmk_rc2legacy(rc);
             break;
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     native->type);
             rc = -EPROTONOSUPPORT;
     }
 
     return rc;
 }
 
 static int
 lrmd_api_is_connected(lrmd_t * lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     switch (native->type) {
         case pcmk__client_ipc:
             return crm_ipc_connected(native->ipc);
         case pcmk__client_tls:
             return remote_executor_connected(lrmd);
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     native->type);
             return 0;
     }
 }
 
 /*!
  * \internal
  * \brief Send a prepared API command to the executor
  *
  * \param[in,out] lrmd          Existing connection to the executor
  * \param[in]     op            Name of API command to send
  * \param[in]     data          Command data XML to add to the sent command
  * \param[out]    output_data   If expecting a reply, it will be stored here
  * \param[in]     timeout       Timeout in milliseconds (if 0, defaults to
  *                              a sensible value per the type of connection,
  *                              standard vs. pacemaker remote);
  *                              also propagated to the command XML
  * \param[in]     call_options  Call options to pass to server when sending
  * \param[in]     expect_reply  If TRUE, wait for a reply from the server;
  *                              must be TRUE for IPC (as opposed to TLS) clients
  *
  * \return pcmk_ok on success, -errno on error
  */
 static int
 lrmd_send_command(lrmd_t *lrmd, const char *op, xmlNode *data,
                   xmlNode **output_data, int timeout,
                   enum lrmd_call_options options, gboolean expect_reply)
 {
     int rc = pcmk_ok;
     lrmd_private_t *native = lrmd->lrmd_private;
     xmlNode *op_msg = NULL;
     xmlNode *op_reply = NULL;
 
     if (!lrmd_api_is_connected(lrmd)) {
         return -ENOTCONN;
     }
 
     if (op == NULL) {
         crm_err("No operation specified");
         return -EINVAL;
     }
 
     CRM_CHECK(native->token != NULL,;
         );
     crm_trace("Sending %s op to executor", op);
 
     op_msg = lrmd_create_op(native->token, op, data, timeout, options);
 
     if (op_msg == NULL) {
         return -EINVAL;
     }
 
     if (expect_reply) {
         rc = lrmd_send_xml(lrmd, op_msg, timeout, &op_reply);
     } else {
         rc = lrmd_send_xml_no_reply(lrmd, op_msg);
         goto done;
     }
 
     if (rc < 0) {
         crm_perror(LOG_ERR, "Couldn't perform %s operation (timeout=%d): %d", op, timeout, rc);
         goto done;
 
     } else if(op_reply == NULL) {
         rc = -ENOMSG;
         goto done;
     }
 
     rc = pcmk_ok;
     crm_trace("%s op reply received", op);
     if (crm_element_value_int(op_reply, PCMK__XA_LRMD_RC, &rc) != 0) {
         rc = -ENOMSG;
         goto done;
     }
 
     crm_log_xml_trace(op_reply, "Reply");
 
     if (output_data) {
         *output_data = op_reply;
         op_reply = NULL;        /* Prevent subsequent free */
     }
 
   done:
     if (lrmd_api_is_connected(lrmd) == FALSE) {
         crm_err("Executor disconnected");
     }
 
     pcmk__xml_free(op_msg);
     pcmk__xml_free(op_reply);
     return rc;
 }
 
 static int
 lrmd_api_poke_connection(lrmd_t * lrmd)
 {
     int rc;
     lrmd_private_t *native = lrmd->lrmd_private;
     xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC);
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
     rc = lrmd_send_command(lrmd, LRMD_OP_POKE, data, NULL, 0, 0,
                            (native->type == pcmk__client_ipc));
     pcmk__xml_free(data);
 
     return rc < 0 ? rc : pcmk_ok;
 }
 
 // \return Standard Pacemaker return code
 int
 lrmd__validate_remote_settings(lrmd_t *lrmd, GHashTable *hash)
 {
     int rc = pcmk_rc_ok;
     const char *value;
     lrmd_private_t *native = lrmd->lrmd_private;
     xmlNode *data = pcmk__xe_create(NULL, PCMK__XA_LRMD_OP);
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
 
     value = g_hash_table_lookup(hash, PCMK_OPT_STONITH_WATCHDOG_TIMEOUT);
     if ((value) &&
         (stonith__watchdog_fencing_enabled_for_node(native->remote_nodename))) {
        crm_xml_add(data, PCMK__XA_LRMD_WATCHDOG, value);
     }
 
     rc = lrmd_send_command(lrmd, LRMD_OP_CHECK, data, NULL, 0, 0,
                            (native->type == pcmk__client_ipc));
     pcmk__xml_free(data);
     return (rc < 0)? pcmk_legacy2rc(rc) : pcmk_rc_ok;
 }
 
 static int
 lrmd_handshake(lrmd_t * lrmd, const char *name)
 {
     int rc = pcmk_ok;
     lrmd_private_t *native = lrmd->lrmd_private;
     xmlNode *reply = NULL;
     xmlNode *hello = pcmk__xe_create(NULL, PCMK__XE_LRMD_COMMAND);
 
     crm_xml_add(hello, PCMK__XA_T, PCMK__VALUE_LRMD);
     crm_xml_add(hello, PCMK__XA_LRMD_OP, CRM_OP_REGISTER);
     crm_xml_add(hello, PCMK__XA_LRMD_CLIENTNAME, name);
     crm_xml_add(hello, PCMK__XA_LRMD_PROTOCOL_VERSION, LRMD_PROTOCOL_VERSION);
 
     /* advertise that we are a proxy provider */
     if (native->proxy_callback) {
         pcmk__xe_set_bool_attr(hello, PCMK__XA_LRMD_IS_IPC_PROVIDER, true);
     }
 
     rc = lrmd_send_xml(lrmd, hello, -1, &reply);
 
     if (rc < 0) {
         crm_perror(LOG_DEBUG, "Couldn't complete registration with the executor API: %d", rc);
         rc = -ECOMM;
     } else if (reply == NULL) {
         crm_err("Did not receive registration reply");
         rc = -EPROTO;
     } else {
         const char *version = crm_element_value(reply,
                                                 PCMK__XA_LRMD_PROTOCOL_VERSION);
         const char *msg_type = crm_element_value(reply, PCMK__XA_LRMD_OP);
         const char *tmp_ticket = crm_element_value(reply,
                                                    PCMK__XA_LRMD_CLIENTID);
         const char *start_state = crm_element_value(reply, PCMK__XA_NODE_START_STATE);
         long long uptime = -1;
 
         crm_element_value_int(reply, PCMK__XA_LRMD_RC, &rc);
 
         /* The remote executor may add its uptime to the XML reply, which is
          * useful in handling transient attributes when the connection to the
          * remote node unexpectedly drops.  If no parameter is given, just
          * default to -1.
          */
         crm_element_value_ll(reply, PCMK__XA_UPTIME, &uptime);
         native->remote->uptime = uptime;
 
         if (start_state) {
             native->remote->start_state = strdup(start_state);
         }
 
         if (rc == -EPROTO) {
             crm_err("Executor protocol version mismatch between client (%s) and server (%s)",
                 LRMD_PROTOCOL_VERSION, version);
             crm_log_xml_err(reply, "Protocol Error");
 
         } else if (!pcmk__str_eq(msg_type, CRM_OP_REGISTER, pcmk__str_casei)) {
             crm_err("Invalid registration message: %s", msg_type);
             crm_log_xml_err(reply, "Bad reply");
             rc = -EPROTO;
         } else if (tmp_ticket == NULL) {
             crm_err("No registration token provided");
             crm_log_xml_err(reply, "Bad reply");
             rc = -EPROTO;
         } else {
             crm_trace("Obtained registration token: %s", tmp_ticket);
             native->token = strdup(tmp_ticket);
             native->peer_version = strdup(version?version:"1.0"); /* Included since 1.1 */
             rc = pcmk_ok;
         }
     }
 
     pcmk__xml_free(reply);
     pcmk__xml_free(hello);
 
     if (rc != pcmk_ok) {
         lrmd_api_disconnect(lrmd);
     }
     return rc;
 }
 
 static int
 lrmd_ipc_connect(lrmd_t * lrmd, int *fd)
 {
     int rc = pcmk_ok;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     struct ipc_client_callbacks lrmd_callbacks = {
         .dispatch = lrmd_ipc_dispatch,
         .destroy = lrmd_ipc_connection_destroy
     };
 
     crm_info("Connecting to executor");
 
     if (fd) {
         /* No mainloop */
         native->ipc = crm_ipc_new(CRM_SYSTEM_LRMD, 0);
         if (native->ipc != NULL) {
             rc = pcmk__connect_generic_ipc(native->ipc);
             if (rc == pcmk_rc_ok) {
                 rc = pcmk__ipc_fd(native->ipc, fd);
             }
             if (rc != pcmk_rc_ok) {
                 crm_err("Connection to executor failed: %s", pcmk_rc_str(rc));
                 rc = -ENOTCONN;
             }
         }
     } else {
         native->source = mainloop_add_ipc_client(CRM_SYSTEM_LRMD, G_PRIORITY_HIGH, 0, lrmd, &lrmd_callbacks);
         native->ipc = mainloop_get_ipc_client(native->source);
     }
 
     if (native->ipc == NULL) {
         crm_debug("Could not connect to the executor API");
         rc = -ENOTCONN;
     }
 
     return rc;
 }
 
 static void
 copy_gnutls_datum(gnutls_datum_t *dest, gnutls_datum_t *source)
 {
     CRM_ASSERT((dest != NULL) && (source != NULL) && (source->data != NULL));
 
     dest->data = gnutls_malloc(source->size);
     pcmk__mem_assert(dest->data);
 
     memcpy(dest->data, source->data, source->size);
     dest->size = source->size;
 }
 
 static void
 clear_gnutls_datum(gnutls_datum_t *datum)
 {
     gnutls_free(datum->data);
     datum->data = NULL;
     datum->size = 0;
 }
 
 #define KEY_READ_LEN 256    // Chunk size for reading key from file
 
 // \return Standard Pacemaker return code
 static int
 read_gnutls_key(const char *location, gnutls_datum_t *key)
 {
     FILE *stream = NULL;
     size_t buf_len = KEY_READ_LEN;
 
     if ((location == NULL) || (key == NULL)) {
         return EINVAL;
     }
 
     stream = fopen(location, "r");
     if (stream == NULL) {
         return errno;
     }
 
     key->data = gnutls_malloc(buf_len);
     key->size = 0;
     while (!feof(stream)) {
         int next = fgetc(stream);
 
         if (next == EOF) {
             if (!feof(stream)) {
                 crm_warn("Pacemaker Remote key read was partially successful "
                          "(copy in memory may be corrupted)");
             }
             break;
         }
         if (key->size == buf_len) {
             buf_len = key->size + KEY_READ_LEN;
             key->data = gnutls_realloc(key->data, buf_len);
             CRM_ASSERT(key->data);
         }
         key->data[key->size++] = (unsigned char) next;
     }
     fclose(stream);
 
     if (key->size == 0) {
         clear_gnutls_datum(key);
         return ENOKEY;
     }
     return pcmk_rc_ok;
 }
 
 // Cache the most recently used Pacemaker Remote authentication key
 
 struct key_cache_s {
     time_t updated;         // When cached key was read (valid for 1 minute)
     const char *location;   // Where cached key was read from
     gnutls_datum_t key;     // Cached key
 };
 
 static bool
 key_is_cached(struct key_cache_s *key_cache)
 {
     return key_cache->updated != 0;
 }
 
 static bool
 key_cache_expired(struct key_cache_s *key_cache)
 {
     return (time(NULL) - key_cache->updated) >= 60;
 }
 
 static void
 clear_key_cache(struct key_cache_s *key_cache)
 {
     clear_gnutls_datum(&(key_cache->key));
     if ((key_cache->updated != 0) || (key_cache->location != NULL)) {
         key_cache->updated = 0;
         key_cache->location = NULL;
         crm_debug("Cleared Pacemaker Remote key cache");
     }
 }
 
 static void
 get_cached_key(struct key_cache_s *key_cache, gnutls_datum_t *key)
 {
     copy_gnutls_datum(key, &(key_cache->key));
     crm_debug("Using cached Pacemaker Remote key from %s",
               pcmk__s(key_cache->location, "unknown location"));
 }
 
 static void
 cache_key(struct key_cache_s *key_cache, gnutls_datum_t *key,
           const char *location)
 {
     key_cache->updated = time(NULL);
     key_cache->location = location;
     copy_gnutls_datum(&(key_cache->key), key);
     crm_debug("Using (and cacheing) Pacemaker Remote key from %s",
               pcmk__s(location, "unknown location"));
 }
 
 /*!
  * \internal
  * \brief Get Pacemaker Remote authentication key from file or cache
  *
  * \param[in]  location         Path to key file to try (this memory must
  *                              persist across all calls of this function)
  * \param[out] key              Key from location or cache
  *
  * \return Standard Pacemaker return code
  */
 static int
 get_remote_key(const char *location, gnutls_datum_t *key)
 {
     static struct key_cache_s key_cache = { 0, };
     int rc = pcmk_rc_ok;
 
     if ((location == NULL) || (key == NULL)) {
         return EINVAL;
     }
 
     if (key_is_cached(&key_cache)) {
         if (key_cache_expired(&key_cache)) {
             clear_key_cache(&key_cache);
         } else {
             get_cached_key(&key_cache, key);
             return pcmk_rc_ok;
         }
     }
 
     rc = read_gnutls_key(location, key);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
     cache_key(&key_cache, key, location);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Initialize the Pacemaker Remote authentication key
  *
  * Try loading the Pacemaker Remote authentication key from cache if available,
  * otherwise from these locations, in order of preference: the value of the
  * PCMK_authkey_location environment variable, if set; the Pacemaker default key
  * file location; or (for historical reasons) /etc/corosync/authkey.
  *
  * \param[out] key  Where to store key
  *
  * \return Standard Pacemaker return code
  */
 int
 lrmd__init_remote_key(gnutls_datum_t *key)
 {
     static const char *env_location = NULL;
     static bool need_env = true;
 
     int env_rc = pcmk_rc_ok;
     int default_rc = pcmk_rc_ok;
     int alt_rc = pcmk_rc_ok;
 
     bool env_is_default = false;
     bool env_is_fallback = false;
 
     if (need_env) {
         env_location = pcmk__env_option(PCMK__ENV_AUTHKEY_LOCATION);
         need_env = false;
     }
 
     // Try location in environment variable, if set
     if (env_location != NULL) {
         env_rc = get_remote_key(env_location, key);
         if (env_rc == pcmk_rc_ok) {
             return pcmk_rc_ok;
         }
 
         env_is_default = !strcmp(env_location, DEFAULT_REMOTE_KEY_LOCATION);
         env_is_fallback = !strcmp(env_location, ALT_REMOTE_KEY_LOCATION);
 
         /* @TODO It would be more secure to fail, rather than fall back to the
          * default, if an explicitly set key location is not readable, and it
          * would be better to never use the Corosync location as a fallback.
          * However, that would break any deployments currently working with the
          * fallbacks.
          *
          * @COMPAT Change at 3.0.0
          */
     }
 
     // Try default location, if environment wasn't explicitly set to it
     if (env_is_default) {
         default_rc = env_rc;
     } else {
         default_rc = get_remote_key(DEFAULT_REMOTE_KEY_LOCATION, key);
     }
 
     // Try fallback location, if environment wasn't set to it and default failed
     // @COMPAT Drop at 3.0.0
     if (env_is_fallback) {
         alt_rc = env_rc;
     } else if (default_rc != pcmk_rc_ok) {
         alt_rc = get_remote_key(ALT_REMOTE_KEY_LOCATION, key);
     }
 
     // We have all results, so log and return
 
     if ((env_rc != pcmk_rc_ok) && (default_rc != pcmk_rc_ok)
         && (alt_rc != pcmk_rc_ok)) { // Environment set, everything failed
 
         crm_warn("Could not read Pacemaker Remote key from %s (%s%s%s%s%s): %s",
                  env_location,
                  env_is_default? "" : "or default location ",
                  env_is_default? "" : DEFAULT_REMOTE_KEY_LOCATION,
                  !env_is_default && !env_is_fallback? " " : "",
                  env_is_fallback? "" : "or fallback location ",
                  env_is_fallback? "" : ALT_REMOTE_KEY_LOCATION,
                  pcmk_rc_str(env_rc));
         return ENOKEY;
     }
 
     if (env_rc != pcmk_rc_ok) { // Environment set but failed, using a default
         crm_warn("Could not read Pacemaker Remote key from %s "
                  "(using %s location %s instead): %s",
                  env_location,
                  (default_rc == pcmk_rc_ok)? "default" : "fallback",
                  (default_rc == pcmk_rc_ok)? DEFAULT_REMOTE_KEY_LOCATION : ALT_REMOTE_KEY_LOCATION,
                  pcmk_rc_str(env_rc));
         crm_warn("This undocumented behavior is deprecated and unsafe and will "
                  "be removed in a future release");
         return pcmk_rc_ok;
     }
 
     if (default_rc != pcmk_rc_ok) {
         if (alt_rc == pcmk_rc_ok) {
             // Environment variable unset, used alternate location
             // This gets caught by the default return below, but we additionally
             // warn on this behavior here.
             crm_warn("Read Pacemaker Remote key from alternate location %s",
                      ALT_REMOTE_KEY_LOCATION);
             crm_warn("This undocumented behavior is deprecated and unsafe and will "
                      "be removed in a future release");
         } else {
             // Environment unset, defaults failed
             crm_warn("Could not read Pacemaker Remote key from default location %s"
                      " (or fallback location %s): %s",
                      DEFAULT_REMOTE_KEY_LOCATION, ALT_REMOTE_KEY_LOCATION,
                      pcmk_rc_str(default_rc));
             return ENOKEY;
         }
     }
 
     return pcmk_rc_ok; // Environment variable unset, a default worked
 }
 
 static void
 lrmd_gnutls_global_init(void)
 {
     static int gnutls_init = 0;
 
     if (!gnutls_init) {
         crm_gnutls_global_init();
     }
     gnutls_init = 1;
 }
 
 static void
 report_async_connection_result(lrmd_t * lrmd, int rc)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     if (native->callback) {
         lrmd_event_data_t event = { 0, };
         event.type = lrmd_event_connect;
         event.remote_nodename = native->remote_nodename;
         event.connection_rc = rc;
         native->callback(&event);
     }
 }
 
 /*!
  * \internal
  * \brief Perform a TLS client handshake with a Pacemaker Remote server
  *
  * \param[in] lrmd  Newly established Pacemaker Remote executor connection
  *
  * \return Standard Pacemaker return code
  */
 static int
 tls_client_handshake(lrmd_t *lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
     int tls_rc = GNUTLS_E_SUCCESS;
     int rc = pcmk__tls_client_handshake(native->remote, TLS_HANDSHAKE_TIMEOUT,
                                         &tls_rc);
 
     if (rc != pcmk_rc_ok) {
         crm_warn("Disconnecting after TLS handshake with "
                  "Pacemaker Remote server %s:%d failed: %s",
                  native->server, native->port,
                  (rc == EPROTO)? gnutls_strerror(tls_rc) : pcmk_rc_str(rc));
         gnutls_deinit(*native->remote->tls_session);
         gnutls_free(native->remote->tls_session);
         native->remote->tls_session = NULL;
         lrmd_tls_connection_destroy(lrmd);
     }
     return rc;
 }
 
 /*!
  * \internal
  * \brief Add trigger and file descriptor mainloop sources for TLS
  *
  * \param[in,out] lrmd          API connection with established TLS session
  * \param[in]     do_handshake  Whether to perform executor handshake
  *
  * \return Standard Pacemaker return code
  */
 static int
 add_tls_to_mainloop(lrmd_t *lrmd, bool do_handshake)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
     int rc = pcmk_rc_ok;
 
     char *name = crm_strdup_printf("pacemaker-remote-%s:%d",
                                    native->server, native->port);
 
     struct mainloop_fd_callbacks tls_fd_callbacks = {
         .dispatch = lrmd_tls_dispatch,
         .destroy = lrmd_tls_connection_destroy,
     };
 
     native->process_notify = mainloop_add_trigger(G_PRIORITY_HIGH,
                                                   lrmd_tls_dispatch, lrmd);
     native->source = mainloop_add_fd(name, G_PRIORITY_HIGH, native->sock, lrmd,
                                      &tls_fd_callbacks);
 
     /* Async connections lose the client name provided by the API caller, so we
      * have to use our generated name here to perform the executor handshake.
      *
      * @TODO Keep track of the caller-provided name. Perhaps we should be using
      * that name in this function instead of generating one anyway.
      */
     if (do_handshake) {
         rc = lrmd_handshake(lrmd, name);
         rc = pcmk_legacy2rc(rc);
     }
     free(name);
     return rc;
 }
 
 static void
 lrmd_tcp_connect_cb(void *userdata, int rc, int sock)
 {
     lrmd_t *lrmd = userdata;
     lrmd_private_t *native = lrmd->lrmd_private;
     gnutls_datum_t psk_key = { NULL, 0 };
 
     native->async_timer = 0;
 
     if (rc != pcmk_rc_ok) {
         lrmd_tls_connection_destroy(lrmd);
         crm_info("Could not connect to Pacemaker Remote at %s:%d: %s "
                  QB_XS " rc=%d",
                  native->server, native->port, pcmk_rc_str(rc), rc);
         report_async_connection_result(lrmd, pcmk_rc2legacy(rc));
         return;
     }
 
     /* The TCP connection was successful, so establish the TLS connection.
      * @TODO make this async to avoid blocking code in client
      */
 
     native->sock = sock;
 
     rc = lrmd__init_remote_key(&psk_key);
     if (rc != pcmk_rc_ok) {
         crm_info("Could not connect to Pacemaker Remote at %s:%d: %s "
                  QB_XS " rc=%d",
                  native->server, native->port, pcmk_rc_str(rc), rc);
         lrmd_tls_connection_destroy(lrmd);
         report_async_connection_result(lrmd, pcmk_rc2legacy(rc));
         return;
     }
 
     gnutls_psk_allocate_client_credentials(&native->psk_cred_c);
     gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW);
     gnutls_free(psk_key.data);
 
     native->remote->tls_session = pcmk__new_tls_session(sock, GNUTLS_CLIENT,
                                                         GNUTLS_CRD_PSK,
                                                         native->psk_cred_c);
     if (native->remote->tls_session == NULL) {
         lrmd_tls_connection_destroy(lrmd);
         report_async_connection_result(lrmd, -EPROTO);
         return;
     }
 
     if (tls_client_handshake(lrmd) != pcmk_rc_ok) {
         report_async_connection_result(lrmd, -EKEYREJECTED);
         return;
     }
 
     crm_info("TLS connection to Pacemaker Remote server %s:%d succeeded",
              native->server, native->port);
     rc = add_tls_to_mainloop(lrmd, true);
     report_async_connection_result(lrmd, pcmk_rc2legacy(rc));
 }
 
 static int
 lrmd_tls_connect_async(lrmd_t * lrmd, int timeout /*ms */ )
 {
     int rc;
     int timer_id = 0;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     lrmd_gnutls_global_init();
     native->sock = -1;
     rc = pcmk__connect_remote(native->server, native->port, timeout, &timer_id,
                               &(native->sock), lrmd, lrmd_tcp_connect_cb);
     if (rc != pcmk_rc_ok) {
         crm_warn("Pacemaker Remote connection to %s:%d failed: %s "
                  QB_XS " rc=%d",
                  native->server, native->port, pcmk_rc_str(rc), rc);
         return pcmk_rc2legacy(rc);
     }
     native->async_timer = timer_id;
     return pcmk_ok;
 }
 
 static int
 lrmd_tls_connect(lrmd_t * lrmd, int *fd)
 {
     int rc;
 
     lrmd_private_t *native = lrmd->lrmd_private;
     gnutls_datum_t psk_key = { NULL, 0 };
 
     lrmd_gnutls_global_init();
 
     native->sock = -1;
     rc = pcmk__connect_remote(native->server, native->port, 0, NULL,
                               &(native->sock), NULL, NULL);
     if (rc != pcmk_rc_ok) {
         crm_warn("Pacemaker Remote connection to %s:%d failed: %s "
                  QB_XS " rc=%d",
                  native->server, native->port, pcmk_rc_str(rc), rc);
         lrmd_tls_connection_destroy(lrmd);
         return -ENOTCONN;
     }
 
     rc = lrmd__init_remote_key(&psk_key);
     if (rc != pcmk_rc_ok) {
         lrmd_tls_connection_destroy(lrmd);
         return pcmk_rc2legacy(rc);
     }
 
     gnutls_psk_allocate_client_credentials(&native->psk_cred_c);
     gnutls_psk_set_client_credentials(native->psk_cred_c, DEFAULT_REMOTE_USERNAME, &psk_key, GNUTLS_PSK_KEY_RAW);
     gnutls_free(psk_key.data);
 
     native->remote->tls_session = pcmk__new_tls_session(native->sock, GNUTLS_CLIENT,
                                                         GNUTLS_CRD_PSK,
                                                         native->psk_cred_c);
     if (native->remote->tls_session == NULL) {
         lrmd_tls_connection_destroy(lrmd);
         return -EPROTO;
     }
 
     if (tls_client_handshake(lrmd) != pcmk_rc_ok) {
         return -EKEYREJECTED;
     }
 
     crm_info("Client TLS connection established with Pacemaker Remote server %s:%d", native->server,
              native->port);
 
     if (fd) {
         *fd = native->sock;
     } else {
         add_tls_to_mainloop(lrmd, false);
     }
     return pcmk_ok;
 }
 
 static int
 lrmd_api_connect(lrmd_t * lrmd, const char *name, int *fd)
 {
     int rc = -ENOTCONN;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     switch (native->type) {
         case pcmk__client_ipc:
             rc = lrmd_ipc_connect(lrmd, fd);
             break;
         case pcmk__client_tls:
             rc = lrmd_tls_connect(lrmd, fd);
             break;
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     native->type);
             rc = -EPROTONOSUPPORT;
     }
 
     if (rc == pcmk_ok) {
         rc = lrmd_handshake(lrmd, name);
     }
 
     return rc;
 }
 
 static int
 lrmd_api_connect_async(lrmd_t * lrmd, const char *name, int timeout)
 {
     int rc = pcmk_ok;
     lrmd_private_t *native = lrmd->lrmd_private;
 
     CRM_CHECK(native && native->callback, return -EINVAL);
 
     switch (native->type) {
         case pcmk__client_ipc:
             /* fake async connection with ipc.  it should be fast
              * enough that we gain very little from async */
             rc = lrmd_api_connect(lrmd, name, NULL);
             if (!rc) {
                 report_async_connection_result(lrmd, rc);
             }
             break;
         case pcmk__client_tls:
             rc = lrmd_tls_connect_async(lrmd, timeout);
             if (rc) {
                 /* connection failed, report rc now */
                 report_async_connection_result(lrmd, rc);
             }
             break;
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     native->type);
             rc = -EPROTONOSUPPORT;
     }
 
     return rc;
 }
 
 static void
 lrmd_ipc_disconnect(lrmd_t * lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     if (native->source != NULL) {
         /* Attached to mainloop */
         mainloop_del_ipc_client(native->source);
         native->source = NULL;
         native->ipc = NULL;
 
     } else if (native->ipc) {
         /* Not attached to mainloop */
         crm_ipc_t *ipc = native->ipc;
 
         native->ipc = NULL;
         crm_ipc_close(ipc);
         crm_ipc_destroy(ipc);
     }
 }
 
 static void
 lrmd_tls_disconnect(lrmd_t * lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     if (native->remote->tls_session) {
         gnutls_bye(*native->remote->tls_session, GNUTLS_SHUT_RDWR);
         gnutls_deinit(*native->remote->tls_session);
         gnutls_free(native->remote->tls_session);
         native->remote->tls_session = NULL;
     }
 
     if (native->async_timer) {
         g_source_remove(native->async_timer);
         native->async_timer = 0;
     }
 
     if (native->source != NULL) {
         /* Attached to mainloop */
         mainloop_del_ipc_client(native->source);
         native->source = NULL;
 
     } else if (native->sock) {
         close(native->sock);
         native->sock = 0;
     }
 
     if (native->pending_notify) {
         g_list_free_full(native->pending_notify, lrmd_free_xml);
         native->pending_notify = NULL;
     }
 }
 
 static int
 lrmd_api_disconnect(lrmd_t * lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
     int rc = pcmk_ok;
 
     switch (native->type) {
         case pcmk__client_ipc:
             crm_debug("Disconnecting from local executor");
             lrmd_ipc_disconnect(lrmd);
             break;
         case pcmk__client_tls:
             crm_debug("Disconnecting from remote executor on %s",
                       native->remote_nodename);
             lrmd_tls_disconnect(lrmd);
             break;
         default:
             crm_err("Unsupported executor connection type (bug?): %d",
                     native->type);
             rc = -EPROTONOSUPPORT;
     }
 
     free(native->token);
     native->token = NULL;
 
     free(native->peer_version);
     native->peer_version = NULL;
     return rc;
 }
 
 static int
 lrmd_api_register_rsc(lrmd_t * lrmd,
                       const char *rsc_id,
                       const char *class,
                       const char *provider, const char *type, enum lrmd_call_options options)
 {
     int rc = pcmk_ok;
     xmlNode *data = NULL;
 
     if (!class || !type || !rsc_id) {
         return -EINVAL;
     }
     if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)
         && (provider == NULL)) {
         return -EINVAL;
     }
 
     data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC);
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id);
     crm_xml_add(data, PCMK__XA_LRMD_CLASS, class);
     crm_xml_add(data, PCMK__XA_LRMD_PROVIDER, provider);
     crm_xml_add(data, PCMK__XA_LRMD_TYPE, type);
     rc = lrmd_send_command(lrmd, LRMD_OP_RSC_REG, data, NULL, 0, options, TRUE);
     pcmk__xml_free(data);
 
     return rc;
 }
 
 static int
 lrmd_api_unregister_rsc(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options)
 {
     int rc = pcmk_ok;
     xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC);
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id);
     rc = lrmd_send_command(lrmd, LRMD_OP_RSC_UNREG, data, NULL, 0, options, TRUE);
     pcmk__xml_free(data);
 
     return rc;
 }
 
 lrmd_rsc_info_t *
 lrmd_new_rsc_info(const char *rsc_id, const char *standard,
                   const char *provider, const char *type)
 {
     lrmd_rsc_info_t *rsc_info = pcmk__assert_alloc(1, sizeof(lrmd_rsc_info_t));
 
     rsc_info->id = pcmk__str_copy(rsc_id);
     rsc_info->standard = pcmk__str_copy(standard);
     rsc_info->provider = pcmk__str_copy(provider);
     rsc_info->type = pcmk__str_copy(type);
     return rsc_info;
 }
 
 lrmd_rsc_info_t *
 lrmd_copy_rsc_info(lrmd_rsc_info_t * rsc_info)
 {
     return lrmd_new_rsc_info(rsc_info->id, rsc_info->standard,
                              rsc_info->provider, rsc_info->type);
 }
 
 void
 lrmd_free_rsc_info(lrmd_rsc_info_t * rsc_info)
 {
     if (!rsc_info) {
         return;
     }
     free(rsc_info->id);
     free(rsc_info->type);
     free(rsc_info->standard);
     free(rsc_info->provider);
     free(rsc_info);
 }
 
 static lrmd_rsc_info_t *
 lrmd_api_get_rsc_info(lrmd_t * lrmd, const char *rsc_id, enum lrmd_call_options options)
 {
     lrmd_rsc_info_t *rsc_info = NULL;
     xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC);
     xmlNode *output = NULL;
     const char *class = NULL;
     const char *provider = NULL;
     const char *type = NULL;
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id);
     lrmd_send_command(lrmd, LRMD_OP_RSC_INFO, data, &output, 0, options, TRUE);
     pcmk__xml_free(data);
 
     if (!output) {
         return NULL;
     }
 
     class = crm_element_value(output, PCMK__XA_LRMD_CLASS);
     provider = crm_element_value(output, PCMK__XA_LRMD_PROVIDER);
     type = crm_element_value(output, PCMK__XA_LRMD_TYPE);
 
     if (!class || !type) {
         pcmk__xml_free(output);
         return NULL;
     } else if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)
                && !provider) {
         pcmk__xml_free(output);
         return NULL;
     }
 
     rsc_info = lrmd_new_rsc_info(rsc_id, class, provider, type);
     pcmk__xml_free(output);
     return rsc_info;
 }
 
 void
 lrmd_free_op_info(lrmd_op_info_t *op_info)
 {
     if (op_info) {
         free(op_info->rsc_id);
         free(op_info->action);
         free(op_info->interval_ms_s);
         free(op_info->timeout_ms_s);
         free(op_info);
     }
 }
 
 static int
 lrmd_api_get_recurring_ops(lrmd_t *lrmd, const char *rsc_id, int timeout_ms,
                            enum lrmd_call_options options, GList **output)
 {
     xmlNode *data = NULL;
     xmlNode *output_xml = NULL;
     int rc = pcmk_ok;
 
     if (output == NULL) {
         return -EINVAL;
     }
     *output = NULL;
 
     // Send request
     if (rsc_id) {
         data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC);
         crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
         crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id);
     }
     rc = lrmd_send_command(lrmd, LRMD_OP_GET_RECURRING, data, &output_xml,
                            timeout_ms, options, TRUE);
     if (data) {
         pcmk__xml_free(data);
     }
 
     // Process reply
     if ((rc != pcmk_ok) || (output_xml == NULL)) {
         return rc;
     }
     for (const xmlNode *rsc_xml = pcmk__xe_first_child(output_xml,
                                                        PCMK__XE_LRMD_RSC, NULL,
                                                        NULL);
          (rsc_xml != NULL) && (rc == pcmk_ok);
          rsc_xml = pcmk__xe_next_same(rsc_xml)) {
 
         rsc_id = crm_element_value(rsc_xml, PCMK__XA_LRMD_RSC_ID);
         if (rsc_id == NULL) {
             crm_err("Could not parse recurring operation information from executor");
             continue;
         }
         for (const xmlNode *op_xml = pcmk__xe_first_child(rsc_xml,
                                                           PCMK__XE_LRMD_RSC_OP,
                                                           NULL, NULL);
              op_xml != NULL; op_xml = pcmk__xe_next_same(op_xml)) {
 
             lrmd_op_info_t *op_info = calloc(1, sizeof(lrmd_op_info_t));
 
             if (op_info == NULL) {
                 rc = -ENOMEM;
                 break;
             }
             op_info->rsc_id = strdup(rsc_id);
             op_info->action = crm_element_value_copy(op_xml,
                                                      PCMK__XA_LRMD_RSC_ACTION);
             op_info->interval_ms_s =
                 crm_element_value_copy(op_xml, PCMK__XA_LRMD_RSC_INTERVAL);
             op_info->timeout_ms_s =
                 crm_element_value_copy(op_xml, PCMK__XA_LRMD_TIMEOUT);
             *output = g_list_prepend(*output, op_info);
         }
     }
     pcmk__xml_free(output_xml);
     return rc;
 }
 
 
 static void
 lrmd_api_set_callback(lrmd_t * lrmd, lrmd_event_callback callback)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     native->callback = callback;
 }
 
 void
 lrmd_internal_set_proxy_callback(lrmd_t * lrmd, void *userdata, void (*callback)(lrmd_t *lrmd, void *userdata, xmlNode *msg))
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     native->proxy_callback = callback;
     native->proxy_callback_userdata = userdata;
 }
 
 void
 lrmd_internal_proxy_dispatch(lrmd_t *lrmd, xmlNode *msg)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     if (native->proxy_callback) {
         crm_log_xml_trace(msg, "PROXY_INBOUND");
         native->proxy_callback(lrmd, native->proxy_callback_userdata, msg);
     }
 }
 
 int
 lrmd_internal_proxy_send(lrmd_t * lrmd, xmlNode *msg)
 {
     if (lrmd == NULL) {
         return -ENOTCONN;
     }
     crm_xml_add(msg, PCMK__XA_LRMD_OP, CRM_OP_IPC_FWD);
 
     crm_log_xml_trace(msg, "PROXY_OUTBOUND");
     return lrmd_send_xml_no_reply(lrmd, msg);
 }
 
 static int
 stonith_get_metadata(const char *provider, const char *type, char **output)
 {
     int rc = pcmk_ok;
     stonith_t *stonith_api = stonith_api_new();
 
     if (stonith_api == NULL) {
         crm_err("Could not get fence agent meta-data: API memory allocation failed");
         return -ENOMEM;
     }
 
     rc = stonith_api->cmds->metadata(stonith_api, st_opt_sync_call, type,
                                      provider, output, 0);
     if ((rc == pcmk_ok) && (*output == NULL)) {
         rc = -EIO;
     }
     stonith_api->cmds->free(stonith_api);
     return rc;
 }
 
 static int
 lrmd_api_get_metadata(lrmd_t *lrmd, const char *standard, const char *provider,
                       const char *type, char **output,
                       enum lrmd_call_options options)
 {
     return lrmd->cmds->get_metadata_params(lrmd, standard, provider, type,
                                            output, options, NULL);
 }
 
 static int
 lrmd_api_get_metadata_params(lrmd_t *lrmd, const char *standard,
                              const char *provider, const char *type,
                              char **output, enum lrmd_call_options options,
                              lrmd_key_value_t *params)
 {
     svc_action_t *action = NULL;
     GHashTable *params_table = NULL;
 
     if (!standard || !type) {
         lrmd_key_value_freeall(params);
         return -EINVAL;
     }
 
     if (pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         lrmd_key_value_freeall(params);
         return stonith_get_metadata(provider, type, output);
     }
 
     params_table = pcmk__strkey_table(free, free);
     for (const lrmd_key_value_t *param = params; param; param = param->next) {
         pcmk__insert_dup(params_table, param->key, param->value);
     }
     action = services__create_resource_action(type, standard, provider, type,
                                               PCMK_ACTION_META_DATA, 0,
                                               PCMK_DEFAULT_METADATA_TIMEOUT_MS,
                                               params_table, 0);
     lrmd_key_value_freeall(params);
 
     if (action == NULL) {
         return -ENOMEM;
     }
     if (action->rc != PCMK_OCF_UNKNOWN) {
         services_action_free(action);
         return -EINVAL;
     }
 
     if (!services_action_sync(action)) {
         crm_err("Failed to retrieve meta-data for %s:%s:%s",
                 standard, provider, type);
         services_action_free(action);
         return -EIO;
     }
 
     if (!action->stdout_data) {
         crm_err("Failed to receive meta-data for %s:%s:%s",
                 standard, provider, type);
         services_action_free(action);
         return -EIO;
     }
 
     *output = strdup(action->stdout_data);
     services_action_free(action);
 
     return pcmk_ok;
 }
 
 static int
 lrmd_api_exec(lrmd_t *lrmd, const char *rsc_id, const char *action,
               const char *userdata, guint interval_ms,
               int timeout,      /* ms */
               int start_delay,  /* ms */
               enum lrmd_call_options options, lrmd_key_value_t * params)
 {
     int rc = pcmk_ok;
     xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC);
     xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES);
     lrmd_key_value_t *tmp = NULL;
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_ACTION, action);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_USERDATA_STR, userdata);
     crm_xml_add_ms(data, PCMK__XA_LRMD_RSC_INTERVAL, interval_ms);
     crm_xml_add_int(data, PCMK__XA_LRMD_TIMEOUT, timeout);
     crm_xml_add_int(data, PCMK__XA_LRMD_RSC_START_DELAY, start_delay);
 
     for (tmp = params; tmp; tmp = tmp->next) {
         hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args);
     }
 
     rc = lrmd_send_command(lrmd, LRMD_OP_RSC_EXEC, data, NULL, timeout, options, TRUE);
     pcmk__xml_free(data);
 
     lrmd_key_value_freeall(params);
     return rc;
 }
 
 /* timeout is in ms */
 static int
 lrmd_api_exec_alert(lrmd_t *lrmd, const char *alert_id, const char *alert_path,
                     int timeout, lrmd_key_value_t *params)
 {
     int rc = pcmk_ok;
     xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_ALERT);
     xmlNode *args = pcmk__xe_create(data, PCMK__XE_ATTRIBUTES);
     lrmd_key_value_t *tmp = NULL;
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
     crm_xml_add(data, PCMK__XA_LRMD_ALERT_ID, alert_id);
     crm_xml_add(data, PCMK__XA_LRMD_ALERT_PATH, alert_path);
     crm_xml_add_int(data, PCMK__XA_LRMD_TIMEOUT, timeout);
 
     for (tmp = params; tmp; tmp = tmp->next) {
         hash2smartfield((gpointer) tmp->key, (gpointer) tmp->value, args);
     }
 
     rc = lrmd_send_command(lrmd, LRMD_OP_ALERT_EXEC, data, NULL, timeout,
                            lrmd_opt_notify_orig_only, TRUE);
     pcmk__xml_free(data);
 
     lrmd_key_value_freeall(params);
     return rc;
 }
 
 static int
 lrmd_api_cancel(lrmd_t *lrmd, const char *rsc_id, const char *action,
                 guint interval_ms)
 {
     int rc = pcmk_ok;
     xmlNode *data = pcmk__xe_create(NULL, PCMK__XE_LRMD_RSC);
 
     crm_xml_add(data, PCMK__XA_LRMD_ORIGIN, __func__);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_ACTION, action);
     crm_xml_add(data, PCMK__XA_LRMD_RSC_ID, rsc_id);
     crm_xml_add_ms(data, PCMK__XA_LRMD_RSC_INTERVAL, interval_ms);
     rc = lrmd_send_command(lrmd, LRMD_OP_RSC_CANCEL, data, NULL, 0, 0, TRUE);
     pcmk__xml_free(data);
     return rc;
 }
 
 static int
 list_stonith_agents(lrmd_list_t ** resources)
 {
     int rc = 0;
     stonith_t *stonith_api = stonith_api_new();
     stonith_key_value_t *stonith_resources = NULL;
     stonith_key_value_t *dIter = NULL;
 
     if (stonith_api == NULL) {
         crm_err("Could not list fence agents: API memory allocation failed");
         return -ENOMEM;
     }
     stonith_api->cmds->list_agents(stonith_api, st_opt_sync_call, NULL,
                                    &stonith_resources, 0);
     stonith_api->cmds->free(stonith_api);
 
     for (dIter = stonith_resources; dIter; dIter = dIter->next) {
         rc++;
         if (resources) {
             *resources = lrmd_list_add(*resources, dIter->value);
         }
     }
 
     stonith_key_value_freeall(stonith_resources, 1, 0);
     return rc;
 }
 
 static int
 lrmd_api_list_agents(lrmd_t * lrmd, lrmd_list_t ** resources, const char *class,
                      const char *provider)
 {
     int rc = 0;
     int stonith_count = 0; // Initially, whether to include stonith devices
 
     if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         stonith_count = 1;
 
     } else {
         GList *gIter = NULL;
         GList *agents = resources_list_agents(class, provider);
 
         for (gIter = agents; gIter != NULL; gIter = gIter->next) {
             *resources = lrmd_list_add(*resources, (const char *)gIter->data);
             rc++;
         }
         g_list_free_full(agents, free);
 
         if (!class) {
             stonith_count = 1;
         }
     }
 
     if (stonith_count) {
         // Now, if stonith devices are included, how many there are
         stonith_count = list_stonith_agents(resources);
         if (stonith_count > 0) {
             rc += stonith_count;
         }
     }
     if (rc == 0) {
         crm_notice("No agents found for class %s", class);
         rc = -EPROTONOSUPPORT;
     }
     return rc;
 }
 
 static bool
 does_provider_have_agent(const char *agent, const char *provider, const char *class)
 {
     bool found = false;
     GList *agents = NULL;
     GList *gIter2 = NULL;
 
     agents = resources_list_agents(class, provider);
     for (gIter2 = agents; gIter2 != NULL; gIter2 = gIter2->next) {
         if (pcmk__str_eq(agent, gIter2->data, pcmk__str_casei)) {
             found = true;
         }
     }
     g_list_free_full(agents, free);
     return found;
 }
 
 static int
 lrmd_api_list_ocf_providers(lrmd_t * lrmd, const char *agent, lrmd_list_t ** providers)
 {
     int rc = pcmk_ok;
     char *provider = NULL;
     GList *ocf_providers = NULL;
     GList *gIter = NULL;
 
     ocf_providers = resources_list_providers(PCMK_RESOURCE_CLASS_OCF);
 
     for (gIter = ocf_providers; gIter != NULL; gIter = gIter->next) {
         provider = gIter->data;
         if (!agent || does_provider_have_agent(agent, provider,
                                                PCMK_RESOURCE_CLASS_OCF)) {
             *providers = lrmd_list_add(*providers, (const char *)gIter->data);
             rc++;
         }
     }
 
     g_list_free_full(ocf_providers, free);
     return rc;
 }
 
 static int
 lrmd_api_list_standards(lrmd_t * lrmd, lrmd_list_t ** supported)
 {
     int rc = 0;
     GList *standards = NULL;
     GList *gIter = NULL;
 
     standards = resources_list_standards();
 
     for (gIter = standards; gIter != NULL; gIter = gIter->next) {
         *supported = lrmd_list_add(*supported, (const char *)gIter->data);
         rc++;
     }
 
     if (list_stonith_agents(NULL) > 0) {
         *supported = lrmd_list_add(*supported, PCMK_RESOURCE_CLASS_STONITH);
         rc++;
     }
 
     g_list_free_full(standards, free);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Create an executor API object
  *
  * \param[out] api       Will be set to newly created API object (it is the
  *                       caller's responsibility to free this value with
  *                       lrmd_api_delete() if this function succeeds)
  * \param[in]  nodename  If the object will be used for a remote connection,
  *                       the node name to use in cluster for remote executor
  * \param[in]  server    If the object will be used for a remote connection,
  *                       the resolvable host name to connect to
  * \param[in]  port      If the object will be used for a remote connection,
  *                       port number on \p server to connect to
  *
  * \return Standard Pacemaker return code
  * \note If the caller leaves one of \p nodename or \p server NULL, the other's
  *       value will be used for both. If the caller leaves both NULL, an API
  *       object will be created for a local executor connection.
  */
 int
 lrmd__new(lrmd_t **api, const char *nodename, const char *server, int port)
 {
     lrmd_private_t *pvt = NULL;
 
     if (api == NULL) {
         return EINVAL;
     }
     *api = NULL;
 
     // Allocate all memory needed
 
     *api = calloc(1, sizeof(lrmd_t));
     if (*api == NULL) {
         return ENOMEM;
     }
 
     pvt = calloc(1, sizeof(lrmd_private_t));
     if (pvt == NULL) {
         lrmd_api_delete(*api);
         *api = NULL;
         return ENOMEM;
     }
     (*api)->lrmd_private = pvt;
 
     // @TODO Do we need to do this for local connections?
     pvt->remote = calloc(1, sizeof(pcmk__remote_t));
 
     (*api)->cmds = calloc(1, sizeof(lrmd_api_operations_t));
 
     if ((pvt->remote == NULL) || ((*api)->cmds == NULL)) {
         lrmd_api_delete(*api);
         *api = NULL;
         return ENOMEM;
     }
 
     // Set methods
     (*api)->cmds->connect = lrmd_api_connect;
     (*api)->cmds->connect_async = lrmd_api_connect_async;
     (*api)->cmds->is_connected = lrmd_api_is_connected;
     (*api)->cmds->poke_connection = lrmd_api_poke_connection;
     (*api)->cmds->disconnect = lrmd_api_disconnect;
     (*api)->cmds->register_rsc = lrmd_api_register_rsc;
     (*api)->cmds->unregister_rsc = lrmd_api_unregister_rsc;
     (*api)->cmds->get_rsc_info = lrmd_api_get_rsc_info;
     (*api)->cmds->get_recurring_ops = lrmd_api_get_recurring_ops;
     (*api)->cmds->set_callback = lrmd_api_set_callback;
     (*api)->cmds->get_metadata = lrmd_api_get_metadata;
     (*api)->cmds->exec = lrmd_api_exec;
     (*api)->cmds->cancel = lrmd_api_cancel;
     (*api)->cmds->list_agents = lrmd_api_list_agents;
     (*api)->cmds->list_ocf_providers = lrmd_api_list_ocf_providers;
     (*api)->cmds->list_standards = lrmd_api_list_standards;
     (*api)->cmds->exec_alert = lrmd_api_exec_alert;
     (*api)->cmds->get_metadata_params = lrmd_api_get_metadata_params;
 
     if ((nodename == NULL) && (server == NULL)) {
         pvt->type = pcmk__client_ipc;
     } else {
         if (nodename == NULL) {
             nodename = server;
         } else if (server == NULL) {
             server = nodename;
         }
         pvt->type = pcmk__client_tls;
         pvt->remote_nodename = strdup(nodename);
         pvt->server = strdup(server);
         if ((pvt->remote_nodename == NULL) || (pvt->server == NULL)) {
             lrmd_api_delete(*api);
             *api = NULL;
             return ENOMEM;
         }
         pvt->port = port;
         if (pvt->port == 0) {
             pvt->port = crm_default_remote_port();
         }
     }
     return pcmk_rc_ok;
 }
 
 lrmd_t *
 lrmd_api_new(void)
 {
     lrmd_t *api = NULL;
 
     CRM_ASSERT(lrmd__new(&api, NULL, NULL, 0) == pcmk_rc_ok);
     return api;
 }
 
 lrmd_t *
 lrmd_remote_api_new(const char *nodename, const char *server, int port)
 {
     lrmd_t *api = NULL;
 
     CRM_ASSERT(lrmd__new(&api, nodename, server, port) == pcmk_rc_ok);
     return api;
 }
 
 void
 lrmd_api_delete(lrmd_t * lrmd)
 {
     if (lrmd == NULL) {
         return;
     }
     if (lrmd->cmds != NULL) { // Never NULL, but make static analysis happy
         if (lrmd->cmds->disconnect != NULL) { // Also never really NULL
             lrmd->cmds->disconnect(lrmd); // No-op if already disconnected
         }
         free(lrmd->cmds);
     }
     if (lrmd->lrmd_private != NULL) {
         lrmd_private_t *native = lrmd->lrmd_private;
 
         free(native->server);
         free(native->remote_nodename);
         free(native->remote);
         free(native->token);
         free(native->peer_version);
         free(lrmd->lrmd_private);
     }
     free(lrmd);
 }
 
 struct metadata_cb {
      void (*callback)(int pid, const pcmk__action_result_t *result,
                       void *user_data);
      void *user_data;
 };
 
 /*!
  * \internal
  * \brief Process asynchronous metadata completion
  *
  * \param[in,out] action  Metadata action that completed
  */
 static void
 metadata_complete(svc_action_t *action)
 {
     struct metadata_cb *metadata_cb = (struct metadata_cb *) action->cb_data;
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
 
     pcmk__set_result(&result, action->rc, action->status,
                      services__exit_reason(action));
     pcmk__set_result_output(&result, action->stdout_data, action->stderr_data);
 
     metadata_cb->callback(0, &result, metadata_cb->user_data);
     result.action_stdout = NULL; // Prevent free, because action owns it
     result.action_stderr = NULL; // Prevent free, because action owns it
     pcmk__reset_result(&result);
     free(metadata_cb);
 }
 
 /*!
  * \internal
  * \brief Retrieve agent metadata asynchronously
  *
  * \param[in]     rsc        Resource agent specification
  * \param[in]     callback   Function to call with result (this will always be
  *                           called, whether by this function directly or later
  *                           via the main loop, and on success the metadata will
  *                           be in its result argument's action_stdout)
  * \param[in,out] user_data  User data to pass to callback
  *
  * \return Standard Pacemaker return code
  * \note This function is not a lrmd_api_operations_t method because it does not
  *       need an lrmd_t object and does not go through the executor, but
  *       executes the agent directly.
  */
 int
 lrmd__metadata_async(const lrmd_rsc_info_t *rsc,
                      void (*callback)(int pid,
                                       const pcmk__action_result_t *result,
                                       void *user_data),
                      void *user_data)
 {
     svc_action_t *action = NULL;
     struct metadata_cb *metadata_cb = NULL;
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
 
     CRM_CHECK(callback != NULL, return EINVAL);
 
     if ((rsc == NULL) || (rsc->standard == NULL) || (rsc->type == NULL)) {
         pcmk__set_result(&result, PCMK_OCF_NOT_CONFIGURED,
                          PCMK_EXEC_ERROR_FATAL,
                          "Invalid resource specification");
         callback(0, &result, user_data);
         pcmk__reset_result(&result);
         return EINVAL;
     }
 
     if (strcmp(rsc->standard, PCMK_RESOURCE_CLASS_STONITH) == 0) {
         return stonith__metadata_async(rsc->type,
                                        PCMK_DEFAULT_METADATA_TIMEOUT_MS / 1000,
                                        callback, user_data);
     }
 
     action = services__create_resource_action(pcmk__s(rsc->id, rsc->type),
                                               rsc->standard, rsc->provider,
                                               rsc->type,
                                               PCMK_ACTION_META_DATA, 0,
                                               PCMK_DEFAULT_METADATA_TIMEOUT_MS,
                                               NULL, 0);
     if (action == NULL) {
         pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR,
                          "Out of memory");
         callback(0, &result, user_data);
         pcmk__reset_result(&result);
         return ENOMEM;
     }
     if (action->rc != PCMK_OCF_UNKNOWN) {
         pcmk__set_result(&result, action->rc, action->status,
                          services__exit_reason(action));
         callback(0, &result, user_data);
         pcmk__reset_result(&result);
         services_action_free(action);
         return EINVAL;
     }
 
     action->cb_data = calloc(1, sizeof(struct metadata_cb));
     if (action->cb_data == NULL) {
         services_action_free(action);
         pcmk__set_result(&result, PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_ERROR,
                          "Out of memory");
         callback(0, &result, user_data);
         pcmk__reset_result(&result);
         return ENOMEM;
     }
 
     metadata_cb = (struct metadata_cb *) action->cb_data;
     metadata_cb->callback = callback;
     metadata_cb->user_data = user_data;
     if (!services_action_async(action, metadata_complete)) {
         services_action_free(action);
         return pcmk_rc_error; // @TODO Derive from action->rc and ->status
     }
 
     // The services library has taken responsibility for action
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Set the result of an executor event
  *
  * \param[in,out] event        Executor event to set
  * \param[in]     rc           OCF exit status of event
  * \param[in]     op_status    Executor status of event
  * \param[in]     exit_reason  Human-friendly description of event
  */
 void
 lrmd__set_result(lrmd_event_data_t *event, enum ocf_exitcode rc, int op_status,
                  const char *exit_reason)
 {
     if (event == NULL) {
         return;
     }
 
     event->rc = rc;
     event->op_status = op_status;
 
     // lrmd_event_data_t has (const char *) members that lrmd_free_event() frees
     pcmk__str_update((char **) &event->exit_reason, exit_reason);
 }
 
 /*!
  * \internal
  * \brief Clear an executor event's exit reason, output, and error output
  *
  * \param[in,out] event  Executor event to reset
  */
 void
 lrmd__reset_result(lrmd_event_data_t *event)
 {
     if (event == NULL) {
         return;
     }
 
     free((void *) event->exit_reason);
     event->exit_reason = NULL;
 
     free((void *) event->output);
     event->output = NULL;
 }
 
 /*!
  * \internal
  * \brief Get the uptime of a remote resource connection
  *
  * When the cluster connects to a remote resource, part of that resource's
  * handshake includes the uptime of the remote resource's connection.  This
  * uptime is stored in the lrmd_t object.
  *
  * \return The connection's uptime, or -1 if unknown
  */
 time_t
 lrmd__uptime(lrmd_t *lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     if (native->remote == NULL) {
         return -1;
     } else {
         return native->remote->uptime;
     }
 }
 
 const char *
 lrmd__node_start_state(lrmd_t *lrmd)
 {
     lrmd_private_t *native = lrmd->lrmd_private;
 
     if (native->remote == NULL) {
         return NULL;
     } else {
         return native->remote->start_state;
     }
 }
diff --git a/lib/pacemaker/pcmk_graph_consumer.c b/lib/pacemaker/pcmk_graph_consumer.c
index 1f7504db55..c59a499337 100644
--- a/lib/pacemaker/pcmk_graph_consumer.c
+++ b/lib/pacemaker/pcmk_graph_consumer.c
@@ -1,890 +1,890 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <sys/stat.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/lrmd_internal.h>
 #include <pacemaker-internal.h>
 
 
 /*
  * Functions for freeing transition graph objects
  */
 
 /*!
  * \internal
  * \brief Free a transition graph action object
  *
  * \param[in,out] user_data  Action to free
  */
 static void
 free_graph_action(gpointer user_data)
 {
     pcmk__graph_action_t *action = user_data;
 
     if (action->timer != 0) {
         crm_warn("Cancelling timer for graph action %d", action->id);
         g_source_remove(action->timer);
     }
     if (action->params != NULL) {
         g_hash_table_destroy(action->params);
     }
     pcmk__xml_free(action->xml);
     free(action);
 }
 
 /*!
  * \internal
  * \brief Free a transition graph synapse object
  *
  * \param[in,out] user_data  Synapse to free
  */
 static void
 free_graph_synapse(gpointer user_data)
 {
     pcmk__graph_synapse_t *synapse = user_data;
 
     g_list_free_full(synapse->actions, free_graph_action);
     g_list_free_full(synapse->inputs, free_graph_action);
     free(synapse);
 }
 
 /*!
  * \internal
  * \brief Free a transition graph object
  *
  * \param[in,out] graph  Transition graph to free
  */
 void
 pcmk__free_graph(pcmk__graph_t *graph)
 {
     if (graph != NULL) {
         g_list_free_full(graph->synapses, free_graph_synapse);
         free(graph->source);
         free(graph->failed_stop_offset);
         free(graph->failed_start_offset);
         free(graph);
     }
 }
 
 
 /*
  * Functions for updating graph
  */
 
 /*!
  * \internal
  * \brief Update synapse after completed prerequisite
  *
  * A synapse is ready to be executed once all its prerequisite actions (inputs)
  * complete. Given a completed action, check whether it is an input for a given
  * synapse, and if so, mark the input as confirmed, and mark the synapse as
  * ready if appropriate.
  *
  * \param[in,out] synapse    Transition graph synapse to update
  * \param[in]     action_id  ID of an action that completed
  *
  * \note The only substantial effect here is confirming synapse inputs.
  *       should_fire_synapse() will recalculate pcmk__synapse_ready, so the only
  *       thing that uses the pcmk__synapse_ready from here is
  *       synapse_state_str().
  */
 static void
 update_synapse_ready(pcmk__graph_synapse_t *synapse, int action_id)
 {
     if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
         return; // All inputs have already been confirmed
     }
 
     // Presume ready until proven otherwise
     pcmk__set_synapse_flags(synapse, pcmk__synapse_ready);
 
     for (GList *lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data;
 
         if (prereq->id == action_id) {
             crm_trace("Confirming input %d of synapse %d",
                       action_id, synapse->id);
             pcmk__set_graph_action_flags(prereq, pcmk__graph_action_confirmed);
 
         } else if (!pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed)) {
             pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
             crm_trace("Synapse %d still not ready after action %d",
                       synapse->id, action_id);
         }
     }
     if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
         crm_trace("Synapse %d is now ready to execute", synapse->id);
     }
 }
 
 /*!
  * \internal
  * \brief Update action and synapse confirmation after action completion
  *
  * \param[in,out] synapse    Transition graph synapse that action belongs to
  * \param[in]     action_id  ID of action that completed
  */
 static void
 update_synapse_confirmed(pcmk__graph_synapse_t *synapse, int action_id)
 {
     bool all_confirmed = true;
 
     for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data;
 
         if (action->id == action_id) {
             crm_trace("Confirmed action %d of synapse %d",
                       action_id, synapse->id);
             pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
 
         } else if (all_confirmed &&
                    !pcmk_is_set(action->flags, pcmk__graph_action_confirmed)) {
             all_confirmed = false;
             crm_trace("Synapse %d still not confirmed after action %d",
                       synapse->id, action_id);
         }
     }
 
     if (all_confirmed
         && !pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
         crm_trace("Confirmed synapse %d", synapse->id);
         pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
     }
 }
 
 /*!
  * \internal
  * \brief Update the transition graph with a completed action result
  *
  * \param[in,out] graph   Transition graph to update
  * \param[in]     action  Action that completed
  */
 void
 pcmk__update_graph(pcmk__graph_t *graph, const pcmk__graph_action_t *action)
 {
     for (GList *lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
 
         if (pcmk_any_flags_set(synapse->flags,
                                pcmk__synapse_confirmed|pcmk__synapse_failed)) {
             continue; // This synapse already completed
 
         } else if (pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
             update_synapse_confirmed(synapse, action->id);
 
         } else if (!pcmk_is_set(action->flags, pcmk__graph_action_failed)
                    || (synapse->priority == PCMK_SCORE_INFINITY)) {
             update_synapse_ready(synapse, action->id);
         }
     }
 }
 
 
 /*
  * Functions for executing graph
  */
 
 /* A transition graph consists of various types of actions. The library caller
  * registers execution functions for each action type, which will be stored
  * here.
  */
 static pcmk__graph_functions_t *graph_fns = NULL;
 
 /*!
  * \internal
  * \brief Set transition graph execution functions
  *
  * \param[in]  Execution functions to use
  */
 void
 pcmk__set_graph_functions(pcmk__graph_functions_t *fns)
 {
     crm_debug("Setting custom functions for executing transition graphs");
     graph_fns = fns;
 
     CRM_ASSERT(graph_fns != NULL);
     CRM_ASSERT(graph_fns->rsc != NULL);
     CRM_ASSERT(graph_fns->cluster != NULL);
     CRM_ASSERT(graph_fns->pseudo != NULL);
     CRM_ASSERT(graph_fns->fence != NULL);
 }
 
 /*!
  * \internal
  * \brief Check whether a graph synapse is ready to be executed
  *
  * \param[in,out] graph    Transition graph that synapse is part of
  * \param[in,out] synapse  Synapse to check
  *
  * \return true if synapse is ready, false otherwise
  */
 static bool
 should_fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
 {
     GList *lpc = NULL;
 
     pcmk__set_synapse_flags(synapse, pcmk__synapse_ready);
     for (lpc = synapse->inputs; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_action_t *prereq = (pcmk__graph_action_t *) lpc->data;
 
         if (!(pcmk_is_set(prereq->flags, pcmk__graph_action_confirmed))) {
             crm_trace("Input %d for synapse %d not yet confirmed",
                       prereq->id, synapse->id);
             pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
             break;
 
         } else if (pcmk_is_set(prereq->flags, pcmk__graph_action_failed)
                    && !pcmk_is_set(prereq->flags,
                                    pcmk__graph_action_can_fail)) {
             crm_trace("Input %d for synapse %d confirmed but failed",
                       prereq->id, synapse->id);
             pcmk__clear_synapse_flags(synapse, pcmk__synapse_ready);
             break;
         }
     }
     if (pcmk_is_set(synapse->flags, pcmk__synapse_ready)) {
         crm_trace("Synapse %d is ready to execute", synapse->id);
     } else {
         return false;
     }
 
     for (lpc = synapse->actions; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_action_t *a = (pcmk__graph_action_t *) lpc->data;
 
         if (a->type == pcmk__pseudo_graph_action) {
             /* None of the below applies to pseudo ops */
 
         } else if (synapse->priority < graph->abort_priority) {
             crm_trace("Skipping synapse %d: priority %d is less than "
                       "abort priority %d",
                       synapse->id, synapse->priority, graph->abort_priority);
             graph->skipped++;
             return false;
 
         } else if (graph_fns->allowed && !(graph_fns->allowed(graph, a))) {
             crm_trace("Deferring synapse %d: not allowed", synapse->id);
             return false;
         }
     }
 
     return true;
 }
 
 /*!
  * \internal
  * \brief Initiate an action from a transition graph
  *
  * \param[in,out] graph   Transition graph containing action
  * \param[in,out] action  Action to execute
  *
  * \return Standard Pacemaker return code
  */
 static int
 initiate_action(pcmk__graph_t *graph, pcmk__graph_action_t *action)
 {
     const char *id = pcmk__xe_id(action->xml);
 
     CRM_CHECK(id != NULL, return EINVAL);
     CRM_CHECK(!pcmk_is_set(action->flags, pcmk__graph_action_executed),
               return pcmk_rc_already);
 
     pcmk__set_graph_action_flags(action, pcmk__graph_action_executed);
     switch (action->type) {
         case pcmk__pseudo_graph_action:
             crm_trace("Executing pseudo-action %d (%s)", action->id, id);
             return graph_fns->pseudo(graph, action);
 
         case pcmk__rsc_graph_action:
             crm_trace("Executing resource action %d (%s)", action->id, id);
             return graph_fns->rsc(graph, action);
 
         case pcmk__cluster_graph_action:
             if (pcmk__str_eq(crm_element_value(action->xml, PCMK_XA_OPERATION),
                              PCMK_ACTION_STONITH, pcmk__str_none)) {
                 crm_trace("Executing fencing action %d (%s)",
                           action->id, id);
                 return graph_fns->fence(graph, action);
             }
             crm_trace("Executing cluster action %d (%s)", action->id, id);
             return graph_fns->cluster(graph, action);
 
         default:
             crm_err("Unsupported graph action type <%s " PCMK_XA_ID "='%s'> "
                     "(bug?)",
                     action->xml->name, id);
             return EINVAL;
     }
 }
 
 /*!
  * \internal
  * \brief Execute a graph synapse
  *
  * \param[in,out] graph    Transition graph with synapse to execute
  * \param[in,out] synapse  Synapse to execute
  *
  * \return Standard Pacemaker return value
  */
 static int
 fire_synapse(pcmk__graph_t *graph, pcmk__graph_synapse_t *synapse)
 {
     pcmk__set_synapse_flags(synapse, pcmk__synapse_executed);
     for (GList *lpc = synapse->actions; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_action_t *action = (pcmk__graph_action_t *) lpc->data;
         int rc = initiate_action(graph, action);
 
         if (rc != pcmk_rc_ok) {
             crm_err("Failed initiating <%s " PCMK_XA_ID "=%d> in synapse %d: "
                     "%s",
                     action->xml->name, action->id, synapse->id,
                     pcmk_rc_str(rc));
             pcmk__set_synapse_flags(synapse, pcmk__synapse_confirmed);
             pcmk__set_graph_action_flags(action,
                                          pcmk__graph_action_confirmed
                                          |pcmk__graph_action_failed);
             return pcmk_rc_error;
         }
     }
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Dummy graph method that can be used with simulations
  *
  * \param[in,out] graph   Transition graph containing action
  * \param[in,out] action  Graph action to be initiated
  *
  * \return Standard Pacemaker return code
  * \note If the PE_fail environment variable is set to the action ID,
  *       then the graph action will be marked as failed.
  */
 static int
 pseudo_action_dummy(pcmk__graph_t *graph, pcmk__graph_action_t *action)
 {
     static int fail = -1;
 
     if (fail < 0) {
         long long fail_ll;
 
         if ((pcmk__scan_ll(getenv("PE_fail"), &fail_ll, 0LL) == pcmk_rc_ok)
             && (fail_ll > 0LL) && (fail_ll <= INT_MAX)) {
             fail = (int) fail_ll;
         } else {
             fail = 0;
         }
     }
 
     if (action->id == fail) {
         crm_err("Dummy event handler: pretending action %d failed", action->id);
         pcmk__set_graph_action_flags(action, pcmk__graph_action_failed);
         graph->abort_priority = PCMK_SCORE_INFINITY;
     } else {
         crm_trace("Dummy event handler: action %d initiated", action->id);
     }
     pcmk__set_graph_action_flags(action, pcmk__graph_action_confirmed);
     pcmk__update_graph(graph, action);
     return pcmk_rc_ok;
 }
 
 static pcmk__graph_functions_t default_fns = {
     pseudo_action_dummy,
     pseudo_action_dummy,
     pseudo_action_dummy,
     pseudo_action_dummy
 };
 
 /*!
  * \internal
  * \brief Execute all actions in a transition graph
  *
  * \param[in,out] graph  Transition graph to execute
  *
  * \return Status of transition after execution
  */
 enum pcmk__graph_status
 pcmk__execute_graph(pcmk__graph_t *graph)
 {
     GList *lpc = NULL;
     int log_level = LOG_DEBUG;
     enum pcmk__graph_status pass_result = pcmk__graph_active;
     const char *status = "In progress";
 
     if (graph_fns == NULL) {
         graph_fns = &default_fns;
     }
     if (graph == NULL) {
         return pcmk__graph_complete;
     }
 
     graph->fired = 0;
     graph->pending = 0;
     graph->skipped = 0;
     graph->completed = 0;
     graph->incomplete = 0;
 
     // Count completed and in-flight synapses
     for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
 
         if (pcmk_is_set(synapse->flags, pcmk__synapse_confirmed)) {
             graph->completed++;
 
         } else if (!pcmk_is_set(synapse->flags, pcmk__synapse_failed)
                    && pcmk_is_set(synapse->flags, pcmk__synapse_executed)) {
             graph->pending++;
         }
     }
     crm_trace("Executing graph %d (%d synapses already completed, %d pending)",
               graph->id, graph->completed, graph->pending);
 
     // Execute any synapses that are ready
     for (lpc = graph->synapses; lpc != NULL; lpc = lpc->next) {
         pcmk__graph_synapse_t *synapse = (pcmk__graph_synapse_t *) lpc->data;
 
         if ((graph->batch_limit > 0)
             && (graph->pending >= graph->batch_limit)) {
 
             crm_debug("Throttling graph execution: batch limit (%d) reached",
                       graph->batch_limit);
             break;
 
         } else if (pcmk_is_set(synapse->flags, pcmk__synapse_failed)) {
             graph->skipped++;
             continue;
 
         } else if (pcmk_any_flags_set(synapse->flags,
                                       pcmk__synapse_confirmed
                                       |pcmk__synapse_executed)) {
             continue; // Already handled
 
         } else if (should_fire_synapse(graph, synapse)) {
             graph->fired++;
             if (fire_synapse(graph, synapse) != pcmk_rc_ok) {
                 crm_err("Synapse %d failed to fire", synapse->id);
                 log_level = LOG_ERR;
                 graph->abort_priority = PCMK_SCORE_INFINITY;
                 graph->incomplete++;
                 graph->fired--;
             }
 
             if (!(pcmk_is_set(synapse->flags, pcmk__synapse_confirmed))) {
                 graph->pending++;
             }
 
         } else {
             crm_trace("Synapse %d cannot fire", synapse->id);
             graph->incomplete++;
         }
     }
 
     if ((graph->pending == 0) && (graph->fired == 0)) {
         graph->complete = true;
 
         if ((graph->incomplete != 0) && (graph->abort_priority <= 0)) {
             log_level = LOG_WARNING;
             pass_result = pcmk__graph_terminated;
             status = "Terminated";
 
         } else if (graph->skipped != 0) {
             log_level = LOG_NOTICE;
             pass_result = pcmk__graph_complete;
             status = "Stopped";
 
         } else {
             log_level = LOG_NOTICE;
             pass_result = pcmk__graph_complete;
             status = "Complete";
         }
 
     } else if (graph->fired == 0) {
         pass_result = pcmk__graph_pending;
     }
 
     do_crm_log(log_level,
                "Transition %d (Complete=%d, Pending=%d,"
                " Fired=%d, Skipped=%d, Incomplete=%d, Source=%s): %s",
                graph->id, graph->completed, graph->pending, graph->fired,
                graph->skipped, graph->incomplete, graph->source, status);
 
     return pass_result;
 }
 
 
 /*
  * Functions for unpacking transition graph XML into structs
  */
 
 /*!
  * \internal
  * \brief Unpack a transition graph action from XML
  *
  * \param[in] parent      Synapse that action is part of
  * \param[in] xml_action  Action XML to unparse
  *
  * \return Newly allocated action on success, or NULL otherwise
  */
 static pcmk__graph_action_t *
 unpack_action(pcmk__graph_synapse_t *parent, xmlNode *xml_action)
 {
     enum pcmk__graph_action_type action_type;
     pcmk__graph_action_t *action = NULL;
     const char *value = pcmk__xe_id(xml_action);
 
     if (value == NULL) {
         crm_err("Ignoring transition graph action without " PCMK_XA_ID
                 " (bug?)");
         crm_log_xml_trace(xml_action, "invalid");
         return NULL;
     }
 
     if (pcmk__xe_is(xml_action, PCMK__XE_RSC_OP)) {
         action_type = pcmk__rsc_graph_action;
 
     } else if (pcmk__xe_is(xml_action, PCMK__XE_PSEUDO_EVENT)) {
         action_type = pcmk__pseudo_graph_action;
 
     } else if (pcmk__xe_is(xml_action, PCMK__XE_CRM_EVENT)) {
         action_type = pcmk__cluster_graph_action;
 
     } else {
         crm_err("Ignoring transition graph action of unknown type '%s' (bug?)",
                 xml_action->name);
         crm_log_xml_trace(xml_action, "invalid");
         return NULL;
     }
 
     action = calloc(1, sizeof(pcmk__graph_action_t));
     if (action == NULL) {
         crm_perror(LOG_CRIT, "Cannot unpack transition graph action");
         crm_log_xml_trace(xml_action, "lost");
         return NULL;
     }
 
     pcmk__scan_min_int(value, &(action->id), -1);
     action->type = pcmk__rsc_graph_action;
     action->xml = pcmk__xml_copy(NULL, xml_action);
     action->synapse = parent;
     action->type = action_type;
     action->params = xml2list(action->xml);
 
     value = crm_meta_value(action->params, PCMK_META_TIMEOUT);
     pcmk__scan_min_int(value, &(action->timeout), 0);
 
     /* Take PCMK_META_START_DELAY into account for the timeout of the action
      * timer
      */
     value = crm_meta_value(action->params, PCMK_META_START_DELAY);
     {
         int start_delay;
 
         pcmk__scan_min_int(value, &start_delay, 0);
         action->timeout += start_delay;
     }
 
     if (pcmk__guint_from_hash(action->params, CRM_META "_" PCMK_META_INTERVAL,
                               0, &(action->interval_ms)) != pcmk_rc_ok) {
         action->interval_ms = 0;
     }
 
     value = crm_meta_value(action->params, PCMK__META_CAN_FAIL);
     if (value != NULL) {
         int can_fail = 0;
 
         if ((crm_str_to_boolean(value, &can_fail) > 0) && (can_fail > 0)) {
             pcmk__set_graph_action_flags(action, pcmk__graph_action_can_fail);
         } else {
             pcmk__clear_graph_action_flags(action, pcmk__graph_action_can_fail);
         }
 
         if (pcmk_is_set(action->flags, pcmk__graph_action_can_fail)) {
             crm_warn("Support for the " PCMK__META_CAN_FAIL " meta-attribute "
                      "is deprecated and will be removed in a future release");
         }
     }
 
     crm_trace("Action %d has timer set to %dms", action->id, action->timeout);
 
     return action;
 }
 
 /*!
  * \internal
  * \brief Unpack transition graph synapse from XML
  *
  * \param[in,out] new_graph    Transition graph that synapse is part of
  * \param[in]     xml_synapse  Synapse XML
  *
  * \return Newly allocated synapse on success, or NULL otherwise
  */
 static pcmk__graph_synapse_t *
 unpack_synapse(pcmk__graph_t *new_graph, const xmlNode *xml_synapse)
 {
     const char *value = NULL;
     xmlNode *action_set = NULL;
     pcmk__graph_synapse_t *new_synapse = NULL;
 
     crm_trace("Unpacking synapse %s", pcmk__xe_id(xml_synapse));
 
     new_synapse = calloc(1, sizeof(pcmk__graph_synapse_t));
     if (new_synapse == NULL) {
         return NULL;
     }
 
     pcmk__scan_min_int(pcmk__xe_id(xml_synapse), &(new_synapse->id), 0);
 
     value = crm_element_value(xml_synapse, PCMK__XA_PRIORITY);
     pcmk__scan_min_int(value, &(new_synapse->priority), 0);
 
     CRM_CHECK(new_synapse->id >= 0,
               free_graph_synapse((gpointer) new_synapse); return NULL);
 
     new_graph->num_synapses++;
 
     crm_trace("Unpacking synapse %s action sets",
               crm_element_value(xml_synapse, PCMK_XA_ID));
 
     for (action_set = pcmk__xe_first_child(xml_synapse, "action_set", NULL,
                                            NULL);
          action_set != NULL; action_set = pcmk__xe_next_same(action_set)) {
 
         for (xmlNode *action = pcmk__xe_first_child(action_set, NULL, NULL,
                                                     NULL);
              action != NULL; action = pcmk__xe_next(action)) {
 
             pcmk__graph_action_t *new_action = unpack_action(new_synapse,
                                                              action);
 
             if (new_action == NULL) {
                 continue;
             }
 
             crm_trace("Adding action %d to synapse %d",
                       new_action->id, new_synapse->id);
             new_graph->num_actions++;
             new_synapse->actions = g_list_append(new_synapse->actions,
                                                  new_action);
         }
     }
 
     crm_trace("Unpacking synapse %s inputs", pcmk__xe_id(xml_synapse));
 
     for (xmlNode *inputs = pcmk__xe_first_child(xml_synapse, "inputs", NULL,
                                                 NULL);
          inputs != NULL; inputs = pcmk__xe_next_same(inputs)) {
 
         for (xmlNode *trigger = pcmk__xe_first_child(inputs, "trigger", NULL,
                                                      NULL);
              trigger != NULL; trigger = pcmk__xe_next_same(trigger)) {
 
             for (xmlNode *input = pcmk__xe_first_child(trigger, NULL, NULL,
                                                        NULL);
                  input != NULL; input = pcmk__xe_next(input)) {
 
                 pcmk__graph_action_t *new_input = unpack_action(new_synapse,
                                                                 input);
 
                 if (new_input == NULL) {
                     continue;
                 }
 
                 crm_trace("Adding input %d to synapse %d",
                            new_input->id, new_synapse->id);
 
                 new_synapse->inputs = g_list_append(new_synapse->inputs,
                                                     new_input);
             }
         }
     }
 
     return new_synapse;
 }
 
 /*!
  * \internal
  * \brief Unpack transition graph XML
  *
  * \param[in] xml_graph  Transition graph XML to unpack
  * \param[in] reference  Where the XML came from (for logging)
  *
  * \return Newly allocated transition graph on success, NULL otherwise
  * \note The caller is responsible for freeing the return value using
  *       pcmk__free_graph().
  * \note The XML is expected to be structured like:
          <transition_graph ...>
            <synapse id="0">
              <action_set>
                <rsc_op id="2" ...>
                ...
              </action_set>
              <inputs>
                  <rsc_op id="1" ...
                  ...
              </inputs>
            </synapse>
            ...
          </transition_graph>
  */
 pcmk__graph_t *
 pcmk__unpack_graph(const xmlNode *xml_graph, const char *reference)
 {
     pcmk__graph_t *new_graph = NULL;
 
     new_graph = calloc(1, sizeof(pcmk__graph_t));
     if (new_graph == NULL) {
         return NULL;
     }
 
     new_graph->source = strdup(pcmk__s(reference, "unknown"));
     if (new_graph->source == NULL) {
         pcmk__free_graph(new_graph);
         return NULL;
     }
 
     new_graph->id = -1;
     new_graph->abort_priority = 0;
     new_graph->network_delay = 0;
     new_graph->stonith_timeout = 0;
     new_graph->completion_action = pcmk__graph_done;
 
     // Parse top-level attributes from PCMK__XE_TRANSITION_GRAPH
     if (xml_graph != NULL) {
         const char *buf = crm_element_value(xml_graph, "transition_id");
 
         CRM_CHECK(buf != NULL,
                   pcmk__free_graph(new_graph); return NULL);
         pcmk__scan_min_int(buf, &(new_graph->id), -1);
 
         buf = crm_element_value(xml_graph, PCMK_OPT_CLUSTER_DELAY);
         CRM_CHECK(buf != NULL,
                   pcmk__free_graph(new_graph); return NULL);
         pcmk_parse_interval_spec(buf, &(new_graph->network_delay));
 
         buf = crm_element_value(xml_graph, PCMK_OPT_STONITH_TIMEOUT);
         if (buf == NULL) {
             new_graph->stonith_timeout = new_graph->network_delay;
         } else {
             pcmk_parse_interval_spec(buf, &(new_graph->stonith_timeout));
         }
 
         // Use 0 (dynamic limit) as default/invalid, -1 (no limit) as minimum
         buf = crm_element_value(xml_graph, PCMK_OPT_BATCH_LIMIT);
         if ((buf == NULL)
             || (pcmk__scan_min_int(buf, &(new_graph->batch_limit),
                                    -1) != pcmk_rc_ok)) {
             new_graph->batch_limit = 0;
         }
 
         buf = crm_element_value(xml_graph, PCMK_OPT_MIGRATION_LIMIT);
         pcmk__scan_min_int(buf, &(new_graph->migration_limit), -1);
 
         new_graph->failed_stop_offset =
             crm_element_value_copy(xml_graph, "failed-stop-offset");
         new_graph->failed_start_offset =
             crm_element_value_copy(xml_graph, "failed-start-offset");
 
         if (crm_element_value_epoch(xml_graph, "recheck-by",
                                     &(new_graph->recheck_by)) != pcmk_ok) {
             new_graph->recheck_by = 0;
         }
     }
 
     // Unpack each child <synapse> element
     for (const xmlNode *synapse_xml = pcmk__xe_first_child(xml_graph,
                                                            "synapse", NULL,
                                                            NULL);
          synapse_xml != NULL; synapse_xml = pcmk__xe_next_same(synapse_xml)) {
 
         pcmk__graph_synapse_t *new_synapse = unpack_synapse(new_graph,
                                                             synapse_xml);
 
         if (new_synapse != NULL) {
             new_graph->synapses = g_list_append(new_graph->synapses,
                                                 new_synapse);
         }
     }
 
     crm_debug("Unpacked transition %d from %s: %d actions in %d synapses",
               new_graph->id, new_graph->source, new_graph->num_actions,
               new_graph->num_synapses);
 
     return new_graph;
 }
 
 
 /*
  * Other transition graph utilities
  */
 
 /*!
  * \internal
  * \brief Synthesize an executor event from a graph action
  *
  * \param[in] resource     If not NULL, use greater call ID than in this XML
  * \param[in] action       Graph action
  * \param[in] status       What to use as event execution status
  * \param[in] rc           What to use as event exit status
  * \param[in] exit_reason  What to use as event exit reason
  *
  * \return Newly allocated executor event on success, or NULL otherwise
  */
 lrmd_event_data_t *
 pcmk__event_from_graph_action(const xmlNode *resource,
                               const pcmk__graph_action_t *action,
                               int status, int rc, const char *exit_reason)
 {
     lrmd_event_data_t *op = NULL;
     GHashTableIter iter;
     const char *name = NULL;
     const char *value = NULL;
     xmlNode *action_resource = NULL;
 
     CRM_CHECK(action != NULL, return NULL);
     CRM_CHECK(action->type == pcmk__rsc_graph_action, return NULL);
 
     action_resource = pcmk__xe_first_child(action->xml, PCMK_XE_PRIMITIVE, NULL,
                                            NULL);
     CRM_CHECK(action_resource != NULL, crm_log_xml_warn(action->xml, "invalid");
                                        return NULL);
 
     op = lrmd_new_event(pcmk__xe_id(action_resource),
                         crm_element_value(action->xml, PCMK_XA_OPERATION),
                         action->interval_ms);
     lrmd__set_result(op, rc, status, exit_reason);
     op->t_run = time(NULL);
-    op->t_rcchange = (unsigned int) op->t_run;
+    op->t_rcchange = op->t_run;
     op->params = pcmk__strkey_table(free, free);
 
     g_hash_table_iter_init(&iter, action->params);
     while (g_hash_table_iter_next(&iter, (void **)&name, (void **)&value)) {
         pcmk__insert_dup(op->params, name, value);
     }
 
     for (xmlNode *xop = pcmk__xe_first_child(resource, NULL, NULL, NULL);
          xop != NULL; xop = pcmk__xe_next(xop)) {
 
         int tmp = 0;
 
         crm_element_value_int(xop, PCMK__XA_CALL_ID, &tmp);
         crm_debug("Got call_id=%d for %s", tmp, pcmk__xe_id(resource));
         if (tmp > op->call_id) {
             op->call_id = tmp;
         }
     }
 
     op->call_id++;
     return op;
 }
diff --git a/lib/pacemaker/pcmk_injections.c b/lib/pacemaker/pcmk_injections.c
index a879e79819..8b2a613505 100644
--- a/lib/pacemaker/pcmk_injections.c
+++ b/lib/pacemaker/pcmk_injections.c
@@ -1,778 +1,778 @@
 /*
  * Copyright 2009-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <unistd.h>
 #include <stdlib.h>
 
 #include <sys/stat.h>
 #include <sys/param.h>
 #include <sys/types.h>
 #include <dirent.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/cib/internal.h>
 #include <crm/common/util.h>
 #include <crm/common/iso8601.h>
 #include <crm/common/xml_internal.h>
 #include <crm/lrmd_events.h>            // lrmd_event_data_t, etc.
 #include <crm/lrmd_internal.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 bool pcmk__simulate_node_config = false;
 
 #define XPATH_NODE_CONFIG   "//" PCMK_XE_NODE "[@" PCMK_XA_UNAME "='%s']"
 #define XPATH_NODE_STATE    "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
 #define XPATH_NODE_STATE_BY_ID "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_ID "='%s']"
 #define XPATH_RSC_HISTORY   XPATH_NODE_STATE \
                             "//" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"
 
 
 /*!
  * \internal
  * \brief Inject a fictitious transient node attribute into scheduler input
  *
  * \param[in,out] out       Output object for displaying error messages
  * \param[in,out] cib_node  \c PCMK__XE_NODE_STATE XML to inject attribute into
  * \param[in]     name      Transient node attribute name to inject
  * \param[in]     value     Transient node attribute value to inject
  */
 static void
 inject_transient_attr(pcmk__output_t *out, xmlNode *cib_node,
                       const char *name, const char *value)
 {
     xmlNode *attrs = NULL;
     xmlNode *instance_attrs = NULL;
     const char *node_uuid = pcmk__xe_id(cib_node);
 
     out->message(out, "inject-attr", name, value, cib_node);
 
     attrs = pcmk__xe_first_child(cib_node, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL,
                                  NULL);
     if (attrs == NULL) {
         attrs = pcmk__xe_create(cib_node, PCMK__XE_TRANSIENT_ATTRIBUTES);
         crm_xml_add(attrs, PCMK_XA_ID, node_uuid);
     }
 
     instance_attrs = pcmk__xe_first_child(attrs, PCMK_XE_INSTANCE_ATTRIBUTES,
                                           NULL, NULL);
     if (instance_attrs == NULL) {
         instance_attrs = pcmk__xe_create(attrs, PCMK_XE_INSTANCE_ATTRIBUTES);
         crm_xml_add(instance_attrs, PCMK_XA_ID, node_uuid);
     }
 
     crm_create_nvpair_xml(instance_attrs, NULL, name, value);
 }
 
 /*!
  * \internal
  * \brief Inject a fictitious fail count into a scheduler input
  *
  * \param[in,out] out          Output object for displaying error messages
  * \param[in,out] cib_conn     CIB connection
  * \param[in,out] cib_node     Node state XML to inject into
  * \param[in]     resource     ID of resource for fail count to inject
  * \param[in]     task         Action name for fail count to inject
  * \param[in]     interval_ms  Action interval (in milliseconds) for fail count
  * \param[in]     exit_status  Action result for fail count to inject (if
  *                             \c PCMK_OCF_OK, or \c PCMK_OCF_NOT_RUNNING when
  *                             \p interval_ms is 0, inject nothing)
  */
 void
 pcmk__inject_failcount(pcmk__output_t *out, cib_t *cib_conn, xmlNode *cib_node,
                        const char *resource, const char *task,
                        guint interval_ms, int exit_status)
 {
     char *name = NULL;
     char *value = NULL;
 
     int failcount = 0;
     xmlNode *output = NULL;
 
     CRM_CHECK((out != NULL) && (cib_conn != NULL) && (cib_node != NULL)
               && (resource != NULL) && (task != NULL), return);
 
     if ((exit_status == PCMK_OCF_OK)
         || ((exit_status == PCMK_OCF_NOT_RUNNING) && (interval_ms == 0))) {
         return;
     }
 
     // Get current failcount and increment it
     name = pcmk__failcount_name(resource, task, interval_ms);
 
     if (cib__get_node_attrs(out, cib_conn, PCMK_XE_STATUS,
                             pcmk__xe_id(cib_node), NULL, NULL, NULL, name,
                             NULL, &output) == pcmk_rc_ok) {
 
         if (crm_element_value_int(output, PCMK_XA_VALUE, &failcount) != 0) {
             failcount = 0;
         }
     }
     value = pcmk__itoa(failcount + 1);
     inject_transient_attr(out, cib_node, name, value);
 
     free(name);
     free(value);
     pcmk__xml_free(output);
 
     name = pcmk__lastfailure_name(resource, task, interval_ms);
     value = pcmk__ttoa(time(NULL));
     inject_transient_attr(out, cib_node, name, value);
 
     free(name);
     free(value);
 }
 
 /*!
  * \internal
  * \brief Create a CIB configuration entry for a fictitious node
  *
  * \param[in,out] cib_conn  CIB object to use
  * \param[in]     node      Node name to use
  */
 static void
 create_node_entry(cib_t *cib_conn, const char *node)
 {
     int rc = pcmk_ok;
     char *xpath = crm_strdup_printf(XPATH_NODE_CONFIG, node);
 
     rc = cib_conn->cmds->query(cib_conn, xpath, NULL, cib_xpath|cib_sync_call);
 
     if (rc == -ENXIO) { // Only add if not already existing
         xmlNode *cib_object = pcmk__xe_create(NULL, PCMK_XE_NODE);
 
         crm_xml_add(cib_object, PCMK_XA_ID, node); // Use node name as ID
         crm_xml_add(cib_object, PCMK_XA_UNAME, node);
         cib_conn->cmds->create(cib_conn, PCMK_XE_NODES, cib_object,
                                cib_sync_call);
         /* Not bothering with subsequent query to see if it exists,
            we'll bomb out later in the call to query_node_uuid()... */
 
         pcmk__xml_free(cib_object);
     }
 
     free(xpath);
 }
 
 /*!
  * \internal
  * \brief Synthesize a fake executor event for an action
  *
  * \param[in] cib_resource  XML for any existing resource action history
  * \param[in] task          Name of action to synthesize
  * \param[in] interval_ms   Interval of action to synthesize
  * \param[in] outcome       Result of action to synthesize
  *
  * \return Newly allocated executor event
  * \note It is the caller's responsibility to free the result with
  *       lrmd_free_event().
  */
 static lrmd_event_data_t *
 create_op(const xmlNode *cib_resource, const char *task, guint interval_ms,
           int outcome)
 {
     lrmd_event_data_t *op = NULL;
     xmlNode *xop = NULL;
 
     op = lrmd_new_event(pcmk__xe_id(cib_resource), task, interval_ms);
     lrmd__set_result(op, outcome, PCMK_EXEC_DONE, "Simulated action result");
     op->params = NULL; // Not needed for simulation purposes
     op->t_run = time(NULL);
-    op->t_rcchange = (unsigned int) op->t_run;
+    op->t_rcchange = op->t_run;
 
     // Use a call ID higher than any existing history entries
     op->call_id = 0;
     for (xop = pcmk__xe_first_child(cib_resource, NULL, NULL, NULL);
          xop != NULL; xop = pcmk__xe_next(xop)) {
 
         int tmp = 0;
 
         crm_element_value_int(xop, PCMK__XA_CALL_ID, &tmp);
         if (tmp > op->call_id) {
             op->call_id = tmp;
         }
     }
     op->call_id++;
 
     return op;
 }
 
 /*!
  * \internal
  * \brief Inject a fictitious resource history entry into a scheduler input
  *
  * \param[in,out] cib_resource  Resource history XML to inject entry into
  * \param[in,out] op            Action result to inject
  * \param[in]     node          Name of node where the action occurred
  * \param[in]     target_rc     Expected result for action to inject
  *
  * \return XML of injected resource history entry
  */
 xmlNode *
 pcmk__inject_action_result(xmlNode *cib_resource, lrmd_event_data_t *op,
                            const char *node, int target_rc)
 {
     return pcmk__create_history_xml(cib_resource, op, CRM_FEATURE_SET,
                                     target_rc, node, crm_system_name);
 }
 
 /*!
  * \internal
  * \brief Inject a fictitious node into a scheduler input
  *
  * \param[in,out] cib_conn  Scheduler input CIB to inject node into
  * \param[in]     node      Name of node to inject
  * \param[in]     uuid      UUID of node to inject
  *
  * \return XML of \c PCMK__XE_NODE_STATE entry for new node
  * \note If the global pcmk__simulate_node_config has been set to true, a
  *       node entry in the configuration section will be added, as well as a
  *       node state entry in the status section.
  */
 xmlNode *
 pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid)
 {
     int rc = pcmk_ok;
     xmlNode *cib_object = NULL;
     char *xpath = crm_strdup_printf(XPATH_NODE_STATE, node);
     bool duplicate = false;
     char *found_uuid = NULL;
 
     if (pcmk__simulate_node_config) {
         create_node_entry(cib_conn, node);
     }
 
     rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object,
                                cib_xpath|cib_sync_call);
 
     if ((cib_object != NULL) && (pcmk__xe_id(cib_object) == NULL)) {
         crm_err("Detected multiple " PCMK__XE_NODE_STATE " entries for "
                 "xpath=%s, bailing",
                 xpath);
         duplicate = true;
         goto done;
     }
 
     if (rc == -ENXIO) {
         if (uuid == NULL) {
             query_node_uuid(cib_conn, node, &found_uuid, NULL);
         } else {
             found_uuid = strdup(uuid);
         }
 
         if (found_uuid) {
             char *xpath_by_uuid = crm_strdup_printf(XPATH_NODE_STATE_BY_ID,
                                                     found_uuid);
 
             /* It's possible that a PCMK__XE_NODE_STATE entry doesn't have a
              * PCMK_XA_UNAME yet
              */
             rc = cib_conn->cmds->query(cib_conn, xpath_by_uuid, &cib_object,
                                        cib_xpath|cib_sync_call);
 
             if ((cib_object != NULL) && (pcmk__xe_id(cib_object) == NULL)) {
                 crm_err("Can't inject node state for %s because multiple "
                         "state entries found for ID %s", node, found_uuid);
                 duplicate = true;
                 free(xpath_by_uuid);
                 goto done;
 
             } else if (cib_object != NULL) {
                 crm_xml_add(cib_object, PCMK_XA_UNAME, node);
 
                 rc = cib_conn->cmds->modify(cib_conn, PCMK_XE_STATUS,
                                             cib_object, cib_sync_call);
             }
 
             free(xpath_by_uuid);
         }
     }
 
     if (rc == -ENXIO) {
         cib_object = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE);
         crm_xml_add(cib_object, PCMK_XA_ID, found_uuid);
         crm_xml_add(cib_object, PCMK_XA_UNAME, node);
         cib_conn->cmds->create(cib_conn, PCMK_XE_STATUS, cib_object,
                                cib_sync_call);
         pcmk__xml_free(cib_object);
 
         rc = cib_conn->cmds->query(cib_conn, xpath, &cib_object,
                                    cib_xpath|cib_sync_call);
         crm_trace("Injecting node state for %s (rc=%d)", node, rc);
     }
 
 done:
     free(found_uuid);
     free(xpath);
 
     if (duplicate) {
         crm_log_xml_warn(cib_object, "Duplicates");
         crm_exit(CRM_EX_SOFTWARE);
         return NULL; // not reached, but makes static analysis happy
     }
 
     CRM_ASSERT(rc == pcmk_ok);
     return cib_object;
 }
 
 /*!
  * \internal
  * \brief Inject a fictitious node state change into a scheduler input
  *
  * \param[in,out] cib_conn  Scheduler input CIB to inject into
  * \param[in]     node      Name of node to inject change for
  * \param[in]     up        If true, change state to online, otherwise offline
  *
  * \return XML of changed (or added) node state entry
  */
 xmlNode *
 pcmk__inject_node_state_change(cib_t *cib_conn, const char *node, bool up)
 {
     xmlNode *cib_node = pcmk__inject_node(cib_conn, node, NULL);
 
     if (up) {
         pcmk__xe_set_props(cib_node,
                            PCMK__XA_IN_CCM, PCMK_VALUE_TRUE,
                            PCMK_XA_CRMD, PCMK_VALUE_ONLINE,
                            PCMK__XA_JOIN, CRMD_JOINSTATE_MEMBER,
                            PCMK_XA_EXPECTED, CRMD_JOINSTATE_MEMBER,
                            NULL);
     } else {
         pcmk__xe_set_props(cib_node,
                            PCMK__XA_IN_CCM, PCMK_VALUE_FALSE,
                            PCMK_XA_CRMD, PCMK_VALUE_OFFLINE,
                            PCMK__XA_JOIN, CRMD_JOINSTATE_DOWN,
                            PCMK_XA_EXPECTED, CRMD_JOINSTATE_DOWN,
                            NULL);
     }
     crm_xml_add(cib_node, PCMK_XA_CRM_DEBUG_ORIGIN, crm_system_name);
     return cib_node;
 }
 
 /*!
  * \internal
  * \brief Check whether a node has history for a given resource
  *
  * \param[in,out] cib_node  Node state XML to check
  * \param[in]     resource  Resource name to check for
  *
  * \return Resource's \c PCMK__XE_LRM_RESOURCE XML entry beneath \p cib_node if
  *         found, otherwise \c NULL
  */
 static xmlNode *
 find_resource_xml(xmlNode *cib_node, const char *resource)
 {
     const char *node = crm_element_value(cib_node, PCMK_XA_UNAME);
     char *xpath = crm_strdup_printf(XPATH_RSC_HISTORY, node, resource);
     xmlNode *match = get_xpath_object(xpath, cib_node, LOG_TRACE);
 
     free(xpath);
     return match;
 }
 
 /*!
  * \internal
  * \brief Inject a resource history element into a scheduler input
  *
  * \param[in,out] out       Output object for displaying error messages
  * \param[in,out] cib_node  Node state XML to inject resource history entry into
  * \param[in]     resource  ID (in configuration) of resource to inject
  * \param[in]     lrm_name  ID as used in history (could be clone instance)
  * \param[in]     rclass    Resource agent class of resource to inject
  * \param[in]     rtype     Resource agent type of resource to inject
  * \param[in]     rprovider Resource agent provider of resource to inject
  *
  * \return XML of injected resource history element
  * \note If a history element already exists under either \p resource or
  *       \p lrm_name, this will return it rather than injecting a new one.
  */
 xmlNode *
 pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
                               const char *resource, const char *lrm_name,
                               const char *rclass, const char *rtype,
                               const char *rprovider)
 {
     xmlNode *lrm = NULL;
     xmlNode *container = NULL;
     xmlNode *cib_resource = NULL;
 
     cib_resource = find_resource_xml(cib_node, resource);
     if (cib_resource != NULL) {
         /* If an existing LRM history entry uses the resource name,
          * continue using it, even if lrm_name is different.
          */
         return cib_resource;
     }
 
     // Check for history entry under preferred name
     if (strcmp(resource, lrm_name) != 0) {
         cib_resource = find_resource_xml(cib_node, lrm_name);
         if (cib_resource != NULL) {
             return cib_resource;
         }
     }
 
     if ((rclass == NULL) || (rtype == NULL)) {
         // @TODO query configuration for class, provider, type
         out->err(out,
                  "Resource %s not found in the status section of %s "
                  "(supply class and type to continue)",
                  resource, pcmk__xe_id(cib_node));
         return NULL;
 
     } else if (!pcmk__strcase_any_of(rclass,
                                      PCMK_RESOURCE_CLASS_OCF,
                                      PCMK_RESOURCE_CLASS_STONITH,
                                      PCMK_RESOURCE_CLASS_SERVICE,
                                      PCMK_RESOURCE_CLASS_UPSTART,
                                      PCMK_RESOURCE_CLASS_SYSTEMD,
                                      PCMK_RESOURCE_CLASS_LSB, NULL)) {
         out->err(out, "Invalid class for %s: %s", resource, rclass);
         return NULL;
 
     } else if (pcmk_is_set(pcmk_get_ra_caps(rclass), pcmk_ra_cap_provider)
                && (rprovider == NULL)) {
         // @TODO query configuration for provider
         out->err(out, "Please specify the provider for resource %s", resource);
         return NULL;
     }
 
     crm_info("Injecting new resource %s into node state '%s'",
              lrm_name, pcmk__xe_id(cib_node));
 
     lrm = pcmk__xe_first_child(cib_node, PCMK__XE_LRM, NULL, NULL);
     if (lrm == NULL) {
         const char *node_uuid = pcmk__xe_id(cib_node);
 
         lrm = pcmk__xe_create(cib_node, PCMK__XE_LRM);
         crm_xml_add(lrm, PCMK_XA_ID, node_uuid);
     }
 
     container = pcmk__xe_first_child(lrm, PCMK__XE_LRM_RESOURCES, NULL, NULL);
     if (container == NULL) {
         container = pcmk__xe_create(lrm, PCMK__XE_LRM_RESOURCES);
     }
 
     cib_resource = pcmk__xe_create(container, PCMK__XE_LRM_RESOURCE);
 
     // If we're creating a new entry, use the preferred name
     crm_xml_add(cib_resource, PCMK_XA_ID, lrm_name);
 
     crm_xml_add(cib_resource, PCMK_XA_CLASS, rclass);
     crm_xml_add(cib_resource, PCMK_XA_PROVIDER, rprovider);
     crm_xml_add(cib_resource, PCMK_XA_TYPE, rtype);
 
     return cib_resource;
 }
 
 /*!
  * \internal
  * \brief Inject a ticket attribute into ticket state
  *
  * \param[in,out] out          Output object for displaying error messages
  * \param[in]     ticket_id    Ticket whose state should be changed
  * \param[in]     attr_name    Ticket attribute name to inject
  * \param[in]     attr_value   Boolean value of ticket attribute to inject
  * \param[in,out] cib          CIB object to use
  *
  * \return Standard Pacemaker return code
  */
 static int
 set_ticket_state_attr(pcmk__output_t *out, const char *ticket_id,
                       const char *attr_name, bool attr_value, cib_t *cib)
 {
     int rc = pcmk_rc_ok;
     xmlNode *xml_top = NULL;
     xmlNode *ticket_state_xml = NULL;
 
     // Check for an existing ticket state entry
     rc = pcmk__get_ticket_state(cib, ticket_id, &ticket_state_xml);
 
     if (rc == pcmk_rc_duplicate_id) {
         out->err(out, "Multiple " PCMK__XE_TICKET_STATE "s match ticket_id=%s",
                  ticket_id);
         rc = pcmk_rc_ok;
     }
 
     if (rc == pcmk_rc_ok) { // Ticket state found, use it
         crm_debug("Injecting attribute into existing ticket state %s",
                   ticket_id);
         xml_top = ticket_state_xml;
 
     } else if (rc == ENXIO) { // No ticket state, create it
         xmlNode *xml_obj = NULL;
 
         xml_top = pcmk__xe_create(NULL, PCMK_XE_STATUS);
         xml_obj = pcmk__xe_create(xml_top, PCMK_XE_TICKETS);
         ticket_state_xml = pcmk__xe_create(xml_obj, PCMK__XE_TICKET_STATE);
         crm_xml_add(ticket_state_xml, PCMK_XA_ID, ticket_id);
 
     } else { // Error
         return rc;
     }
 
     // Add the attribute to the ticket state
     pcmk__xe_set_bool_attr(ticket_state_xml, attr_name, attr_value);
     crm_log_xml_debug(xml_top, "Update");
 
     // Commit the change to the CIB
     rc = cib->cmds->modify(cib, PCMK_XE_STATUS, xml_top, cib_sync_call);
     rc = pcmk_legacy2rc(rc);
 
     pcmk__xml_free(xml_top);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Inject a fictitious action into the cluster
  *
  * \param[in,out] out       Output object for displaying error messages
  * \param[in]     spec      Action specification to inject
  * \param[in,out] cib       CIB object for scheduler input
  * \param[in]     scheduler  Scheduler data
  */
 static void
 inject_action(pcmk__output_t *out, const char *spec, cib_t *cib,
               const pcmk_scheduler_t *scheduler)
 {
     int rc;
     int outcome = PCMK_OCF_OK;
     guint interval_ms = 0;
 
     char *key = NULL;
     char *node = NULL;
     char *task = NULL;
     char *resource = NULL;
 
     const char *rtype = NULL;
     const char *rclass = NULL;
     const char *rprovider = NULL;
 
     xmlNode *cib_op = NULL;
     xmlNode *cib_node = NULL;
     xmlNode *cib_resource = NULL;
     const pcmk_resource_t *rsc = NULL;
     lrmd_event_data_t *op = NULL;
 
     out->message(out, "inject-spec", spec);
 
     key = pcmk__assert_alloc(1, strlen(spec) + 1);
     node = pcmk__assert_alloc(1, strlen(spec) + 1);
     rc = sscanf(spec, "%[^@]@%[^=]=%d", key, node, &outcome);
     if (rc != 3) {
         out->err(out, "Invalid operation spec: %s.  Only found %d fields",
                  spec, rc);
         goto done;
     }
 
     parse_op_key(key, &resource, &task, &interval_ms);
 
     rsc = pe_find_resource(scheduler->resources, resource);
     if (rsc == NULL) {
         out->err(out, "Invalid resource name: %s", resource);
         goto done;
     }
 
     rclass = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
     rtype = crm_element_value(rsc->private->xml, PCMK_XA_TYPE);
     rprovider = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER);
 
     cib_node = pcmk__inject_node(cib, node, NULL);
     CRM_ASSERT(cib_node != NULL);
 
     pcmk__inject_failcount(out, cib, cib_node, resource, task, interval_ms,
                            outcome);
 
     cib_resource = pcmk__inject_resource_history(out, cib_node,
                                                  resource, resource,
                                                  rclass, rtype, rprovider);
     CRM_ASSERT(cib_resource != NULL);
 
     op = create_op(cib_resource, task, interval_ms, outcome);
     CRM_ASSERT(op != NULL);
 
     cib_op = pcmk__inject_action_result(cib_resource, op, node, 0);
     CRM_ASSERT(cib_op != NULL);
     lrmd_free_event(op);
 
     rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call);
     CRM_ASSERT(rc == pcmk_ok);
 
 done:
     free(task);
     free(node);
     free(key);
 }
 
 /*!
  * \internal
  * \brief Inject fictitious scheduler inputs
  *
  * \param[in,out] scheduler   Scheduler data
  * \param[in,out] cib         CIB object for scheduler input to modify
  * \param[in]     injections  Injections to apply
  */
 void
 pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib,
                              const pcmk_injections_t *injections)
 {
     int rc = pcmk_ok;
     const GList *iter = NULL;
     xmlNode *cib_node = NULL;
     pcmk__output_t *out = scheduler->priv;
 
     out->message(out, "inject-modify-config", injections->quorum,
                  injections->watchdog);
     if (injections->quorum != NULL) {
         xmlNode *top = pcmk__xe_create(NULL, PCMK_XE_CIB);
 
         /* crm_xml_add(top, PCMK_XA_DC_UUID, dc_uuid);      */
         crm_xml_add(top, PCMK_XA_HAVE_QUORUM, injections->quorum);
 
         rc = cib->cmds->modify(cib, NULL, top, cib_sync_call);
         CRM_ASSERT(rc == pcmk_ok);
     }
 
     if (injections->watchdog != NULL) {
         rc = cib__update_node_attr(out, cib, cib_sync_call, PCMK_XE_CRM_CONFIG,
                                    NULL, NULL, NULL, NULL,
                                    PCMK_OPT_HAVE_WATCHDOG, injections->watchdog,
                                    NULL, NULL);
         CRM_ASSERT(rc == pcmk_rc_ok);
     }
 
     for (iter = injections->node_up; iter != NULL; iter = iter->next) {
         const char *node = (const char *) iter->data;
 
         out->message(out, "inject-modify-node", "Online", node);
 
         cib_node = pcmk__inject_node_state_change(cib, node, true);
         CRM_ASSERT(cib_node != NULL);
 
         rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call);
         CRM_ASSERT(rc == pcmk_ok);
         pcmk__xml_free(cib_node);
     }
 
     for (iter = injections->node_down; iter != NULL; iter = iter->next) {
         const char *node = (const char *) iter->data;
         char *xpath = NULL;
 
         out->message(out, "inject-modify-node", "Offline", node);
 
         cib_node = pcmk__inject_node_state_change(cib, node, false);
         CRM_ASSERT(cib_node != NULL);
 
         rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call);
         CRM_ASSERT(rc == pcmk_ok);
         pcmk__xml_free(cib_node);
 
         xpath = crm_strdup_printf("//" PCMK__XE_NODE_STATE
                                   "[@" PCMK_XA_UNAME "='%s']"
                                   "/" PCMK__XE_LRM,
                                   node);
         cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_sync_call);
         free(xpath);
 
         xpath = crm_strdup_printf("//" PCMK__XE_NODE_STATE
                                   "[@" PCMK_XA_UNAME "='%s']"
                                   "/" PCMK__XE_TRANSIENT_ATTRIBUTES,
                                   node);
         cib->cmds->remove(cib, xpath, NULL, cib_xpath|cib_sync_call);
         free(xpath);
     }
 
     for (iter = injections->node_fail; iter != NULL; iter = iter->next) {
         const char *node = (const char *) iter->data;
 
         out->message(out, "inject-modify-node", "Failing", node);
 
         cib_node = pcmk__inject_node_state_change(cib, node, true);
         crm_xml_add(cib_node, PCMK__XA_IN_CCM, PCMK_VALUE_FALSE);
         CRM_ASSERT(cib_node != NULL);
 
         rc = cib->cmds->modify(cib, PCMK_XE_STATUS, cib_node, cib_sync_call);
         CRM_ASSERT(rc == pcmk_ok);
         pcmk__xml_free(cib_node);
     }
 
     for (iter = injections->ticket_grant; iter != NULL; iter = iter->next) {
         const char *ticket_id = (const char *) iter->data;
 
         out->message(out, "inject-modify-ticket", "Granting", ticket_id);
 
         rc = set_ticket_state_attr(out, ticket_id, PCMK__XA_GRANTED, true, cib);
         CRM_ASSERT(rc == pcmk_rc_ok);
     }
 
     for (iter = injections->ticket_revoke; iter != NULL; iter = iter->next) {
         const char *ticket_id = (const char *) iter->data;
 
         out->message(out, "inject-modify-ticket", "Revoking", ticket_id);
 
         rc = set_ticket_state_attr(out, ticket_id, PCMK__XA_GRANTED, false,
                                    cib);
         CRM_ASSERT(rc == pcmk_rc_ok);
     }
 
     for (iter = injections->ticket_standby; iter != NULL; iter = iter->next) {
         const char *ticket_id = (const char *) iter->data;
 
         out->message(out, "inject-modify-ticket", "Standby", ticket_id);
 
         rc = set_ticket_state_attr(out, ticket_id, PCMK_XA_STANDBY, true, cib);
         CRM_ASSERT(rc == pcmk_rc_ok);
     }
 
     for (iter = injections->ticket_activate; iter != NULL; iter = iter->next) {
         const char *ticket_id = (const char *) iter->data;
 
         out->message(out, "inject-modify-ticket", "Activating", ticket_id);
 
         rc = set_ticket_state_attr(out, ticket_id, PCMK_XA_STANDBY, false, cib);
         CRM_ASSERT(rc == pcmk_rc_ok);
     }
 
     for (iter = injections->op_inject; iter != NULL; iter = iter->next) {
         inject_action(out, (const char *) iter->data, cib, scheduler);
     }
 
     if (!out->is_quiet(out)) {
         out->end_list(out);
     }
 }
 
 void
 pcmk_free_injections(pcmk_injections_t *injections)
 {
     if (injections == NULL) {
         return;
     }
 
     g_list_free_full(injections->node_up, g_free);
     g_list_free_full(injections->node_down, g_free);
     g_list_free_full(injections->node_fail, g_free);
     g_list_free_full(injections->op_fail, g_free);
     g_list_free_full(injections->op_inject, g_free);
     g_list_free_full(injections->ticket_grant, g_free);
     g_list_free_full(injections->ticket_revoke, g_free);
     g_list_free_full(injections->ticket_standby, g_free);
     g_list_free_full(injections->ticket_activate, g_free);
     free(injections->quorum);
     free(injections->watchdog);
 
     free(injections);
 }
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index 97da163022..6e31a6dbe9 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -1,1945 +1,1945 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <sys/param.h>
 #include <glib.h>
 
 #include <crm/lrmd_internal.h>
 #include <crm/common/scheduler_internal.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Get the action flags relevant to ordering constraints
  *
  * \param[in,out] action  Action to check
  * \param[in]     node    Node that *other* action in the ordering is on
  *                        (used only for clone resource actions)
  *
  * \return Action flags that should be used for orderings
  */
 static uint32_t
 action_flags_for_ordering(pcmk_action_t *action, const pcmk_node_t *node)
 {
     bool runnable = false;
     uint32_t flags;
 
     // For non-resource actions, return the action flags
     if (action->rsc == NULL) {
         return action->flags;
     }
 
     /* For non-clone resources, or a clone action not assigned to a node,
      * return the flags as determined by the resource method without a node
      * specified.
      */
     flags = action->rsc->private->cmds->action_flags(action, NULL);
     if ((node == NULL) || !pcmk__is_clone(action->rsc)) {
         return flags;
     }
 
     /* Otherwise (i.e., for clone resource actions on a specific node), first
      * remember whether the non-node-specific action is runnable.
      */
     runnable = pcmk_is_set(flags, pcmk_action_runnable);
 
     // Then recheck the resource method with the node
     flags = action->rsc->private->cmds->action_flags(action, node);
 
     /* For clones in ordering constraints, the node-specific "runnable" doesn't
      * matter, just the non-node-specific setting (i.e., is the action runnable
      * anywhere).
      *
      * This applies only to runnable, and only for ordering constraints. This
      * function shouldn't be used for other types of constraints without
      * changes. Not very satisfying, but it's logical and appears to work well.
      */
     if (runnable && !pcmk_is_set(flags, pcmk_action_runnable)) {
         pcmk__set_raw_action_flags(flags, action->rsc->id,
                                    pcmk_action_runnable);
     }
     return flags;
 }
 
 /*!
  * \internal
  * \brief Get action UUID that should be used with a resource ordering
  *
  * When an action is ordered relative to an action for a collective resource
  * (clone, group, or bundle), it actually needs to be ordered after all
  * instances of the collective have completed the relevant action (for example,
  * given "start CLONE then start RSC", RSC must wait until all instances of
  * CLONE have started). Given the UUID and resource of the first action in an
  * ordering, this returns the UUID of the action that should actually be used
  * for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0").
  *
  * \param[in] first_uuid    UUID of first action in ordering
  * \param[in] first_rsc     Resource of first action in ordering
  *
  * \return Newly allocated copy of UUID to use with ordering
  * \note It is the caller's responsibility to free the return value.
  */
 static char *
 action_uuid_for_ordering(const char *first_uuid,
                          const pcmk_resource_t *first_rsc)
 {
     guint interval_ms = 0;
     char *uuid = NULL;
     char *rid = NULL;
     char *first_task_str = NULL;
     enum action_tasks first_task = pcmk_action_unspecified;
     enum action_tasks remapped_task = pcmk_action_unspecified;
 
     // Only non-notify actions for collective resources need remapping
     if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
         || (first_rsc->private->variant < pcmk__rsc_variant_group)) {
         goto done;
     }
 
     // Only non-recurring actions need remapping
     CRM_ASSERT(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms));
     if (interval_ms > 0) {
         goto done;
     }
 
     first_task = pcmk_parse_action(first_task_str);
     switch (first_task) {
         case pcmk_action_stop:
         case pcmk_action_start:
         case pcmk_action_notify:
         case pcmk_action_promote:
         case pcmk_action_demote:
             remapped_task = first_task + 1;
             break;
         case pcmk_action_stopped:
         case pcmk_action_started:
         case pcmk_action_notified:
         case pcmk_action_promoted:
         case pcmk_action_demoted:
             remapped_task = first_task;
             break;
         case pcmk_action_monitor:
         case pcmk_action_shutdown:
         case pcmk_action_fence:
             break;
         default:
             crm_err("Unknown action '%s' in ordering", first_task_str);
             break;
     }
 
     if (remapped_task != pcmk_action_unspecified) {
         /* If a clone or bundle has notifications enabled, the ordering will be
          * relative to when notifications have been sent for the remapped task.
          */
         if (pcmk_is_set(first_rsc->flags, pcmk__rsc_notify)
             && (pcmk__is_clone(first_rsc) || pcmk__is_bundled(first_rsc))) {
             uuid = pcmk__notify_key(rid, "confirmed-post",
                                     pcmk_action_text(remapped_task));
         } else {
             uuid = pcmk__op_key(rid, pcmk_action_text(remapped_task), 0);
         }
         pcmk__rsc_trace(first_rsc,
                         "Remapped action UUID %s to %s for ordering purposes",
                         first_uuid, uuid);
     }
 
 done:
     free(first_task_str);
     free(rid);
     return (uuid != NULL)? uuid : pcmk__str_copy(first_uuid);
 }
 
 /*!
  * \internal
  * \brief Get actual action that should be used with an ordering
  *
  * When an action is ordered relative to an action for a collective resource
  * (clone, group, or bundle), it actually needs to be ordered after all
  * instances of the collective have completed the relevant action (for example,
  * given "start CLONE then start RSC", RSC must wait until all instances of
  * CLONE have started). Given the first action in an ordering, this returns the
  * the action that should actually be used for ordering (for example, the
  * started action instead of the start action).
  *
  * \param[in] action  First action in an ordering
  *
  * \return Actual action that should be used for the ordering
  */
 static pcmk_action_t *
 action_for_ordering(pcmk_action_t *action)
 {
     pcmk_action_t *result = action;
     pcmk_resource_t *rsc = action->rsc;
 
     if (rsc == NULL) {
         return result;
     }
 
     if ((rsc->private->variant >= pcmk__rsc_variant_group)
         && (action->uuid != NULL)) {
         char *uuid = action_uuid_for_ordering(action->uuid, rsc);
 
         result = find_first_action(rsc->private->actions, uuid, NULL, NULL);
         if (result == NULL) {
             crm_warn("Not remapping %s to %s because %s does not have "
                      "remapped action", action->uuid, uuid, rsc->id);
             result = action;
         }
         free(uuid);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Wrapper for update_ordered_actions() method for readability
  *
  * \param[in,out] rsc        Resource to call method for
  * \param[in,out] first      'First' action in an ordering
  * \param[in,out] then       'Then' action in an ordering
  * \param[in]     node       If not NULL, limit scope of ordering to this
  *                           node (only used when interleaving instances)
  * \param[in]     flags      Action flags for \p first for ordering purposes
  * \param[in]     filter     Action flags to limit scope of certain updates
  *                           (may include pcmk_action_optional to affect only
  *                           mandatory actions, and pe_action_runnable to
  *                           affect only runnable actions)
  * \param[in]     type       Group of enum pcmk__action_relation_flags to apply
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 static inline uint32_t
 update(pcmk_resource_t *rsc, pcmk_action_t *first, pcmk_action_t *then,
        const pcmk_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
        pcmk_scheduler_t *scheduler)
 {
     return rsc->private->cmds->update_ordered_actions(first, then, node, flags,
                                                       filter, type, scheduler);
 }
 
 /*!
  * \internal
  * \brief Update flags for ordering's actions appropriately for ordering's flags
  *
  * \param[in,out] first        First action in an ordering
  * \param[in,out] then         Then action in an ordering
  * \param[in]     first_flags  Action flags for \p first for ordering purposes
  * \param[in]     then_flags   Action flags for \p then for ordering purposes
  * \param[in,out] order        Action wrapper for \p first in ordering
  * \param[in,out] scheduler    Scheduler data
  *
  * \return Group of enum pcmk__updated flags
  */
 static uint32_t
 update_action_for_ordering_flags(pcmk_action_t *first, pcmk_action_t *then,
                                  uint32_t first_flags, uint32_t then_flags,
                                  pcmk__related_action_t *order,
                                  pcmk_scheduler_t *scheduler)
 {
     uint32_t changed = pcmk__updated_none;
 
     /* The node will only be used for clones. If interleaved, node will be NULL,
      * otherwise the ordering scope will be limited to the node. Normally, the
      * whole 'then' clone should restart if 'first' is restarted, so then->node
      * is needed.
      */
     pcmk_node_t *node = then->node;
 
     if (pcmk_is_set(order->type, pcmk__ar_first_implies_same_node_then)) {
         /* For unfencing, only instances of 'then' on the same node as 'first'
          * (the unfencing operation) should restart, so reset node to
          * first->node, at which point this case is handled like a normal
          * pcmk__ar_first_implies_then.
          */
         pcmk__clear_relation_flags(order->type,
                                    pcmk__ar_first_implies_same_node_then);
         pcmk__set_relation_flags(order->type, pcmk__ar_first_implies_then);
         node = first->node;
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: mapped "
                         "pcmk__ar_first_implies_same_node_then to "
                         "pcmk__ar_first_implies_then on %s",
                         first->uuid, then->uuid, pcmk__node_name(node));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_first_implies_then)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node,
                               first_flags & pcmk_action_optional,
                               pcmk_action_optional, pcmk__ar_first_implies_then,
                               scheduler);
         } else if (!pcmk_is_set(first_flags, pcmk_action_optional)
                    && pcmk_is_set(then->flags, pcmk_action_optional)) {
             pcmk__clear_action_flags(then, pcmk_action_optional);
             pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after pcmk__ar_first_implies_then",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_intermediate_stop)
         && (then->rsc != NULL)) {
         enum pe_action_flags restart = pcmk_action_optional
                                        |pcmk_action_runnable;
 
         changed |= update(then->rsc, first, then, node, first_flags, restart,
                           pcmk__ar_intermediate_stop, scheduler);
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after pcmk__ar_intermediate_stop",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_then_implies_first)) {
         if (first->rsc != NULL) {
             changed |= update(first->rsc, first, then, node, first_flags,
                               pcmk_action_optional, pcmk__ar_then_implies_first,
                               scheduler);
         } else if (!pcmk_is_set(first_flags, pcmk_action_optional)
                    && pcmk_is_set(first->flags, pcmk_action_runnable)) {
             pcmk__clear_action_flags(first, pcmk_action_runnable);
             pcmk__set_updated_flags(changed, first, pcmk__updated_first);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after pcmk__ar_then_implies_first",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_promoted_then_implies_first)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node,
                               first_flags & pcmk_action_optional,
                               pcmk_action_optional,
                               pcmk__ar_promoted_then_implies_first, scheduler);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after "
                         "pcmk__ar_promoted_then_implies_first",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_min_runnable)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pcmk_action_runnable, pcmk__ar_min_runnable,
                               scheduler);
 
         } else if (pcmk_is_set(first_flags, pcmk_action_runnable)) {
             // We have another runnable instance of "first"
             then->runnable_before++;
 
             /* Mark "then" as runnable if it requires a certain number of
              * "before" instances to be runnable, and they now are.
              */
             if ((then->runnable_before >= then->required_runnable_before)
                 && !pcmk_is_set(then->flags, pcmk_action_runnable)) {
 
                 pcmk__set_action_flags(then, pcmk_action_runnable);
                 pcmk__set_updated_flags(changed, first, pcmk__updated_then);
             }
         }
         pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_min_runnable",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_nested_remote_probe)
         && (then->rsc != NULL)) {
 
         if (!pcmk_is_set(first_flags, pcmk_action_runnable)
             && (first->rsc != NULL)
             && (first->rsc->private->active_nodes != NULL)) {
 
             pcmk__rsc_trace(then->rsc,
                             "%s then %s: ignoring because first is stopping",
                             first->uuid, then->uuid);
             order->type = (enum pe_ordering) pcmk__ar_none;
         } else {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pcmk_action_runnable,
                               pcmk__ar_unrunnable_first_blocks, scheduler);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after pcmk__ar_nested_remote_probe",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_unrunnable_first_blocks)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pcmk_action_runnable,
                               pcmk__ar_unrunnable_first_blocks, scheduler);
 
         } else if (!pcmk_is_set(first_flags, pcmk_action_runnable)
                    && pcmk_is_set(then->flags, pcmk_action_runnable)) {
 
             pcmk__clear_action_flags(then, pcmk_action_runnable);
             pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after pcmk__ar_unrunnable_first_blocks",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_unmigratable_then_blocks)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pcmk_action_optional,
                               pcmk__ar_unmigratable_then_blocks, scheduler);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after "
                         "pcmk__ar_unmigratable_then_blocks",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_first_else_then)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pcmk_action_optional, pcmk__ar_first_else_then,
                               scheduler);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after pcmk__ar_first_else_then",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_ordered)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pcmk_action_runnable, pcmk__ar_ordered,
                               scheduler);
         }
         pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_ordered",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_asymmetric)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pcmk_action_runnable, pcmk__ar_asymmetric,
                               scheduler);
         }
         pcmk__rsc_trace(then->rsc, "%s then %s: %s after pcmk__ar_asymmetric",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(first->flags, pcmk_action_runnable)
         && pcmk_is_set(order->type, pcmk__ar_first_implies_then_graphed)
         && !pcmk_is_set(first_flags, pcmk_action_optional)) {
 
         pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
                         then->uuid, first->uuid);
         pcmk__set_action_flags(then, pcmk_action_always_in_graph);
         // Don't bother marking 'then' as changed just for this
     }
 
     if (pcmk_is_set(order->type, pcmk__ar_then_implies_first_graphed)
         && !pcmk_is_set(then_flags, pcmk_action_optional)) {
 
         pcmk__rsc_trace(then->rsc, "%s will be in graph because %s is required",
                         first->uuid, then->uuid);
         pcmk__set_action_flags(first, pcmk_action_always_in_graph);
         // Don't bother marking 'first' as changed just for this
     }
 
     if (pcmk_any_flags_set(order->type, pcmk__ar_first_implies_then
                                         |pcmk__ar_then_implies_first
                                         |pcmk__ar_intermediate_stop)
         && (first->rsc != NULL)
         && !pcmk_is_set(first->rsc->flags, pcmk__rsc_managed)
         && pcmk_is_set(first->rsc->flags, pcmk__rsc_blocked)
         && !pcmk_is_set(first->flags, pcmk_action_runnable)
         && pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
 
         if (pcmk_is_set(then->flags, pcmk_action_runnable)) {
             pcmk__clear_action_flags(then, pcmk_action_runnable);
             pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         }
         pcmk__rsc_trace(then->rsc,
                         "%s then %s: %s after checking whether first "
                         "is blocked, unmanaged, unrunnable stop",
                         first->uuid, then->uuid,
                         (changed? "changed" : "unchanged"));
     }
 
     return changed;
 }
 
 // Convenience macros for logging action properties
 
 #define action_type_str(flags) \
     (pcmk_is_set((flags), pcmk_action_pseudo)? "pseudo-action" : "action")
 
 #define action_optional_str(flags) \
     (pcmk_is_set((flags), pcmk_action_optional)? "optional" : "required")
 
 #define action_runnable_str(flags) \
     (pcmk_is_set((flags), pcmk_action_runnable)? "runnable" : "unrunnable")
 
 #define action_node_str(a) \
     (((a)->node == NULL)? "no node" : (a)->node->private->name)
 
 /*!
  * \internal
  * \brief Update an action's flags for all orderings where it is "then"
  *
  * \param[in,out] then       Action to update
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__update_action_for_orderings(pcmk_action_t *then,
                                   pcmk_scheduler_t *scheduler)
 {
     GList *lpc = NULL;
     uint32_t changed = pcmk__updated_none;
     int last_flags = then->flags;
 
     pcmk__rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s",
                     action_type_str(then->flags), then->uuid,
                     action_optional_str(then->flags),
                     action_runnable_str(then->flags), action_node_str(then));
 
     if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
         /* Initialize current known "runnable before" actions. As
          * update_action_for_ordering_flags() is called for each of then's
          * before actions, this number will increment as runnable 'first'
          * actions are encountered.
          */
         then->runnable_before = 0;
 
         if (then->required_runnable_before == 0) {
             /* @COMPAT This ordering constraint uses the deprecated
              * PCMK_XA_REQUIRE_ALL=PCMK_VALUE_FALSE attribute. Treat it like
              * PCMK_META_CLONE_MIN=1.
              */
             then->required_runnable_before = 1;
         }
 
         /* The pcmk__ar_min_runnable clause of
          * update_action_for_ordering_flags() (called below)
          * will reset runnable if appropriate.
          */
         pcmk__clear_action_flags(then, pcmk_action_runnable);
     }
 
     for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
         pcmk__related_action_t *other = lpc->data;
         pcmk_action_t *first = other->action;
 
         pcmk_node_t *then_node = then->node;
         pcmk_node_t *first_node = first->node;
 
         if ((first->rsc != NULL)
             && pcmk__is_group(first->rsc)
             && pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
 
             first_node = first->rsc->private->fns->location(first->rsc, NULL,
                                                             FALSE);
             if (first_node != NULL) {
                 pcmk__rsc_trace(first->rsc, "Found %s for 'first' %s",
                                 pcmk__node_name(first_node), first->uuid);
             }
         }
 
         if (pcmk__is_group(then->rsc)
             && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
 
             then_node = then->rsc->private->fns->location(then->rsc, NULL,
                                                           FALSE);
             if (then_node != NULL) {
                 pcmk__rsc_trace(then->rsc, "Found %s for 'then' %s",
                                 pcmk__node_name(then_node), then->uuid);
             }
         }
 
         // Disable constraint if it only applies when on same node, but isn't
         if (pcmk_is_set(other->type, pcmk__ar_if_on_same_node)
             && (first_node != NULL) && (then_node != NULL)
             && !pcmk__same_node(first_node, then_node)) {
 
             pcmk__rsc_trace(then->rsc,
                             "Disabled ordering %s on %s then %s on %s: "
                             "not same node",
                             other->action->uuid, pcmk__node_name(first_node),
                             then->uuid, pcmk__node_name(then_node));
             other->type = (enum pe_ordering) pcmk__ar_none;
             continue;
         }
 
         pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
 
         if ((first->rsc != NULL)
             && pcmk_is_set(other->type, pcmk__ar_then_cancels_first)
             && !pcmk_is_set(then->flags, pcmk_action_optional)) {
 
             /* 'then' is required, so we must abandon 'first'
              * (e.g. a required stop cancels any agent reload).
              */
             pcmk__set_action_flags(other->action, pcmk_action_optional);
             if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
                 pcmk__clear_rsc_flags(first->rsc, pcmk__rsc_reload);
             }
         }
 
         if ((first->rsc != NULL) && (then->rsc != NULL)
             && (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) {
             first = action_for_ordering(first);
         }
         if (first != other->action) {
             pcmk__rsc_trace(then->rsc, "Ordering %s after %s instead of %s",
                             then->uuid, first->uuid, other->action->uuid);
         }
 
         pcmk__rsc_trace(then->rsc,
                         "%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s",
                         first->uuid, first->flags, then->uuid, then->flags,
                         other->type, action_node_str(first));
 
         if (first == other->action) {
             /* 'first' was not remapped (e.g. from 'start' to 'running'), which
              * could mean it is a non-resource action, a primitive resource
              * action, or already expanded.
              */
             uint32_t first_flags, then_flags;
 
             first_flags = action_flags_for_ordering(first, then_node);
             then_flags = action_flags_for_ordering(then, first_node);
 
             changed |= update_action_for_ordering_flags(first, then,
                                                         first_flags, then_flags,
                                                         other, scheduler);
 
             /* 'first' was for a complex resource (clone, group, etc),
              * create a new dependency if necessary
              */
         } else if (order_actions(first, then, other->type)) {
             /* This was the first time 'first' and 'then' were associated,
              * start again to get the new actions_before list
              */
             pcmk__set_updated_flags(changed, then, pcmk__updated_then);
             pcmk__rsc_trace(then->rsc,
                             "Disabled ordering %s then %s in favor of %s "
                             "then %s",
                             other->action->uuid, then->uuid, first->uuid,
                             then->uuid);
             other->type = (enum pe_ordering) pcmk__ar_none;
         }
 
 
         if (pcmk_is_set(changed, pcmk__updated_first)) {
             crm_trace("Re-processing %s and its 'after' actions "
                       "because it changed", first->uuid);
             for (GList *lpc2 = first->actions_after; lpc2 != NULL;
                  lpc2 = lpc2->next) {
                 pcmk__related_action_t *other = lpc2->data;
 
                 pcmk__update_action_for_orderings(other->action, scheduler);
             }
             pcmk__update_action_for_orderings(first, scheduler);
         }
     }
 
     if (pcmk_is_set(then->flags, pcmk_action_min_runnable)) {
         if (last_flags == then->flags) {
             pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
         } else {
             pcmk__set_updated_flags(changed, then, pcmk__updated_then);
         }
     }
 
     if (pcmk_is_set(changed, pcmk__updated_then)) {
         crm_trace("Re-processing %s and its 'after' actions because it changed",
                   then->uuid);
         if (pcmk_is_set(last_flags, pcmk_action_runnable)
             && !pcmk_is_set(then->flags, pcmk_action_runnable)) {
             pcmk__block_colocation_dependents(then);
         }
         pcmk__update_action_for_orderings(then, scheduler);
         for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
             pcmk__related_action_t *other = lpc->data;
 
             pcmk__update_action_for_orderings(other->action, scheduler);
         }
     }
 }
 
 static inline bool
 is_primitive_action(const pcmk_action_t *action)
 {
     return (action != NULL) && pcmk__is_primitive(action->rsc);
 }
 
 /*!
  * \internal
  * \brief Clear a single action flag and set reason text
  *
  * \param[in,out] action  Action whose flag should be cleared
  * \param[in]     flag    Action flag that should be cleared
  * \param[in]     reason  Action that is the reason why flag is being cleared
  */
 #define clear_action_flag_because(action, flag, reason) do {                \
         if (pcmk_is_set((action)->flags, (flag))) {                         \
             pcmk__clear_action_flags(action, flag);                         \
             if ((action)->rsc != (reason)->rsc) {                           \
                 char *reason_text = pe__action2reason((reason), (flag));    \
                 pe_action_set_reason((action), reason_text, false);         \
                 free(reason_text);                                          \
             }                                                               \
         }                                                                   \
     } while (0)
 
 /*!
  * \internal
  * \brief Update actions in an asymmetric ordering
  *
  * If the "first" action in an asymmetric ordering is unrunnable, make the
  * "second" action unrunnable as well, if appropriate.
  *
  * \param[in]     first  'First' action in an asymmetric ordering
  * \param[in,out] then   'Then' action in an asymmetric ordering
  */
 static void
 handle_asymmetric_ordering(const pcmk_action_t *first, pcmk_action_t *then)
 {
     /* Only resource actions after an unrunnable 'first' action need updates for
      * asymmetric ordering.
      */
     if ((then->rsc == NULL)
         || pcmk_is_set(first->flags, pcmk_action_runnable)) {
         return;
     }
 
     // Certain optional 'then' actions are unaffected by unrunnable 'first'
     if (pcmk_is_set(then->flags, pcmk_action_optional)) {
         enum rsc_role_e then_rsc_role;
 
         then_rsc_role = then->rsc->private->fns->state(then->rsc, TRUE);
 
         if ((then_rsc_role == pcmk_role_stopped)
             && pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
             /* If 'then' should stop after 'first' but is already stopped, the
              * ordering is irrelevant.
              */
             return;
         } else if ((then_rsc_role >= pcmk_role_started)
             && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
             && pe__rsc_running_on_only(then->rsc, then->node)) {
             /* Similarly if 'then' should start after 'first' but is already
              * started on a single node.
              */
             return;
         }
     }
 
     // 'First' can't run, so 'then' can't either
     clear_action_flag_because(then, pcmk_action_optional, first);
     clear_action_flag_because(then, pcmk_action_runnable, first);
 }
 
 /*!
  * \internal
  * \brief Set action bits appropriately when pcmk__ar_intermediate_stop is used
  *
  * \param[in,out] first   'First' action in ordering
  * \param[in,out] then    'Then' action in ordering
  * \param[in]     filter  What action flags to care about
  *
  * \note pcmk__ar_intermediate_stop is set for "stop resource before starting
  *       it" and "stop later group member before stopping earlier group member"
  */
 static void
 handle_restart_ordering(pcmk_action_t *first, pcmk_action_t *then,
                         uint32_t filter)
 {
     const char *reason = NULL;
 
     CRM_ASSERT(is_primitive_action(first));
     CRM_ASSERT(is_primitive_action(then));
 
     // We need to update the action in two cases:
 
     // ... if 'then' is required
     if (pcmk_is_set(filter, pcmk_action_optional)
         && !pcmk_is_set(then->flags, pcmk_action_optional)) {
         reason = "restart";
     }
 
     /* ... if 'then' is unrunnable action on same resource (if a resource
      * should restart but can't start, we still want to stop)
      */
     if (pcmk_is_set(filter, pcmk_action_runnable)
         && !pcmk_is_set(then->flags, pcmk_action_runnable)
         && pcmk_is_set(then->rsc->flags, pcmk__rsc_managed)
         && (first->rsc == then->rsc)) {
         reason = "stop";
     }
 
     if (reason == NULL) {
         return;
     }
 
     pcmk__rsc_trace(first->rsc, "Handling %s -> %s for %s",
                     first->uuid, then->uuid, reason);
 
     // Make 'first' required if it is runnable
     if (pcmk_is_set(first->flags, pcmk_action_runnable)) {
         clear_action_flag_because(first, pcmk_action_optional, then);
     }
 
     // Make 'first' required if 'then' is required
     if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
         clear_action_flag_because(first, pcmk_action_optional, then);
     }
 
     // Make 'first' unmigratable if 'then' is unmigratable
     if (!pcmk_is_set(then->flags, pcmk_action_migratable)) {
         clear_action_flag_because(first, pcmk_action_migratable, then);
     }
 
     // Make 'then' unrunnable if 'first' is required but unrunnable
     if (!pcmk_is_set(first->flags, pcmk_action_optional)
         && !pcmk_is_set(first->flags, pcmk_action_runnable)) {
         clear_action_flag_because(then, pcmk_action_runnable, first);
     }
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two actions, update the actions' flags
  * (and runnable_before members if appropriate) as appropriate for the ordering.
  * Effects may cascade to other orderings involving the actions as well.
  *
  * \param[in,out] first      'First' action in an ordering
  * \param[in,out] then       'Then' action in an ordering
  * \param[in]     node       If not NULL, limit scope of ordering to this node
  *                           (ignored)
  * \param[in]     flags      Action flags for \p first for ordering purposes
  * \param[in]     filter     Action flags to limit scope of certain updates (may
  *                           include pcmk_action_optional to affect only
  *                           mandatory actions, and pcmk_action_runnable to
  *                           affect only runnable actions)
  * \param[in]     type       Group of enum pcmk__action_relation_flags to apply
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
                              const pcmk_node_t *node, uint32_t flags,
                              uint32_t filter, uint32_t type,
                              pcmk_scheduler_t *scheduler)
 {
     uint32_t changed = pcmk__updated_none;
     uint32_t then_flags = 0U;
     uint32_t first_flags = 0U;
 
     CRM_ASSERT((first != NULL) && (then != NULL) && (scheduler != NULL));
 
     then_flags = then->flags;
     first_flags = first->flags;
     if (pcmk_is_set(type, pcmk__ar_asymmetric)) {
         handle_asymmetric_ordering(first, then);
     }
 
     if (pcmk_is_set(type, pcmk__ar_then_implies_first)
         && !pcmk_is_set(then_flags, pcmk_action_optional)) {
         // Then is required, and implies first should be, too
 
         if (pcmk_is_set(filter, pcmk_action_optional)
             && !pcmk_is_set(flags, pcmk_action_optional)
             && pcmk_is_set(first_flags, pcmk_action_optional)) {
             clear_action_flag_because(first, pcmk_action_optional, then);
         }
 
         if (pcmk_is_set(flags, pcmk_action_migratable)
             && !pcmk_is_set(then->flags, pcmk_action_migratable)) {
             clear_action_flag_because(first, pcmk_action_migratable, then);
         }
     }
 
     if (pcmk_is_set(type, pcmk__ar_promoted_then_implies_first)
         && (then->rsc != NULL)
         && (then->rsc->private->orig_role == pcmk_role_promoted)
         && pcmk_is_set(filter, pcmk_action_optional)
         && !pcmk_is_set(then->flags, pcmk_action_optional)) {
 
         clear_action_flag_because(first, pcmk_action_optional, then);
 
         if (pcmk_is_set(first->flags, pcmk_action_migratable)
             && !pcmk_is_set(then->flags, pcmk_action_migratable)) {
             clear_action_flag_because(first, pcmk_action_migratable, then);
         }
     }
 
     if (pcmk_is_set(type, pcmk__ar_unmigratable_then_blocks)
         && pcmk_is_set(filter, pcmk_action_optional)) {
 
         if (!pcmk_all_flags_set(then->flags, pcmk_action_migratable
                                              |pcmk_action_runnable)) {
             clear_action_flag_because(first, pcmk_action_runnable, then);
         }
 
         if (!pcmk_is_set(then->flags, pcmk_action_optional)) {
             clear_action_flag_because(first, pcmk_action_optional, then);
         }
     }
 
     if (pcmk_is_set(type, pcmk__ar_first_else_then)
         && pcmk_is_set(filter, pcmk_action_optional)
         && !pcmk_is_set(first->flags, pcmk_action_runnable)) {
 
         clear_action_flag_because(then, pcmk_action_migratable, first);
         pcmk__clear_action_flags(then, pcmk_action_pseudo);
     }
 
     if (pcmk_is_set(type, pcmk__ar_unrunnable_first_blocks)
         && pcmk_is_set(filter, pcmk_action_runnable)
         && pcmk_is_set(then->flags, pcmk_action_runnable)
         && !pcmk_is_set(flags, pcmk_action_runnable)) {
 
         clear_action_flag_because(then, pcmk_action_runnable, first);
         clear_action_flag_because(then, pcmk_action_migratable, first);
     }
 
     if (pcmk_is_set(type, pcmk__ar_first_implies_then)
         && pcmk_is_set(filter, pcmk_action_optional)
         && pcmk_is_set(then->flags, pcmk_action_optional)
         && !pcmk_is_set(flags, pcmk_action_optional)
         && !pcmk_is_set(first->flags, pcmk_action_migratable)) {
 
         clear_action_flag_because(then, pcmk_action_optional, first);
     }
 
     if (pcmk_is_set(type, pcmk__ar_intermediate_stop)) {
         handle_restart_ordering(first, then, filter);
     }
 
     if (then_flags != then->flags) {
         pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         pcmk__rsc_trace(then->rsc,
                         "%s on %s: flags are now %#.6x (was %#.6x) "
                         "because of 'first' %s (%#.6x)",
                         then->uuid, pcmk__node_name(then->node),
                         then->flags, then_flags, first->uuid, first->flags);
 
         if ((then->rsc != NULL) && (then->rsc->private->parent != NULL)) {
             // Required to handle "X_stop then X_start" for cloned groups
             pcmk__update_action_for_orderings(then, scheduler);
         }
     }
 
     if (first_flags != first->flags) {
         pcmk__set_updated_flags(changed, first, pcmk__updated_first);
         pcmk__rsc_trace(first->rsc,
                         "%s on %s: flags are now %#.6x (was %#.6x) "
                         "because of 'then' %s (%#.6x)",
                         first->uuid, pcmk__node_name(first->node),
                         first->flags, first_flags, then->uuid, then->flags);
     }
 
     return changed;
 }
 
 /*!
  * \internal
  * \brief Trace-log an action (optionally with its dependent actions)
  *
  * \param[in] pre_text  If not NULL, prefix the log with this plus ": "
  * \param[in] action    Action to log
  * \param[in] details   If true, recursively log dependent actions
  */
 void
 pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
                  bool details)
 {
     const char *node_uname = NULL;
     const char *node_uuid = NULL;
     const char *desc = NULL;
 
     CRM_CHECK(action != NULL, return);
 
     if (!pcmk_is_set(action->flags, pcmk_action_pseudo)) {
         if (action->node != NULL) {
             node_uname = action->node->private->name;
             node_uuid = action->node->private->id;
         } else {
             node_uname = "<none>";
         }
     }
 
     switch (pcmk_parse_action(action->task)) {
         case pcmk_action_fence:
         case pcmk_action_shutdown:
             if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
                 desc = "Pseudo ";
             } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
                 desc = "Optional ";
             } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
                 desc = "!!Non-Startable!! ";
             } else {
                desc = "(Provisional) ";
             }
             crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
                       ((pre_text == NULL)? "" : pre_text),
                       ((pre_text == NULL)? "" : ": "),
                       desc, action->id, action->uuid,
                       (node_uname? "\ton " : ""), (node_uname? node_uname : ""),
                       (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
                       (node_uuid? ")" : ""));
             break;
         default:
             if (pcmk_is_set(action->flags, pcmk_action_optional)) {
                 desc = "Optional ";
             } else if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
                 desc = "Pseudo ";
             } else if (!pcmk_is_set(action->flags, pcmk_action_runnable)) {
                 desc = "!!Non-Startable!! ";
             } else {
                desc = "(Provisional) ";
             }
             crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
                       ((pre_text == NULL)? "" : pre_text),
                       ((pre_text == NULL)? "" : ": "),
                       desc, action->id, action->uuid,
                       (action->rsc? action->rsc->id : "<none>"),
                       (node_uname? "\ton " : ""), (node_uname? node_uname : ""),
                       (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
                       (node_uuid? ")" : ""));
             break;
     }
 
     if (details) {
         const GList *iter = NULL;
         const pcmk__related_action_t *other = NULL;
 
         crm_trace("\t\t====== Preceding Actions");
         for (iter = action->actions_before; iter != NULL; iter = iter->next) {
             other = (const pcmk__related_action_t *) iter->data;
             pcmk__log_action("\t\t", other->action, false);
         }
         crm_trace("\t\t====== Subsequent Actions");
         for (iter = action->actions_after; iter != NULL; iter = iter->next) {
             other = (const pcmk__related_action_t *) iter->data;
             pcmk__log_action("\t\t", other->action, false);
         }
         crm_trace("\t\t====== End");
 
     } else {
         crm_trace("\t\t(before=%d, after=%d)",
                   g_list_length(action->actions_before),
                   g_list_length(action->actions_after));
     }
 }
 
 /*!
  * \internal
  * \brief Create a new shutdown action for a node
  *
  * \param[in,out] node  Node being shut down
  *
  * \return Newly created shutdown action for \p node
  */
 pcmk_action_t *
 pcmk__new_shutdown_action(pcmk_node_t *node)
 {
     char *shutdown_id = NULL;
     pcmk_action_t *shutdown_op = NULL;
 
     CRM_ASSERT(node != NULL);
 
     shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
                                     node->private->name);
 
     shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
                                 node, FALSE, node->private->scheduler);
 
     pcmk__order_stops_before_shutdown(node, shutdown_op);
     pcmk__insert_meta(shutdown_op, PCMK__META_OP_NO_WAIT, PCMK_VALUE_TRUE);
     return shutdown_op;
 }
 
 /*!
  * \internal
  * \brief Calculate and add an operation digest to XML
  *
  * Calculate an operation digest, which enables us to later determine when a
  * restart is needed due to the resource's parameters being changed, and add it
  * to given XML.
  *
  * \param[in]     op      Operation result from executor
  * \param[in,out] update  XML to add digest to
  */
 static void
 add_op_digest_to_xml(const lrmd_event_data_t *op, xmlNode *update)
 {
     char *digest = NULL;
     xmlNode *args_xml = NULL;
 
     if (op->params == NULL) {
         return;
     }
     args_xml = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
     g_hash_table_foreach(op->params, hash2field, args_xml);
     pcmk__filter_op_for_digest(args_xml);
     digest = pcmk__digest_operation(args_xml);
     crm_xml_add(update, PCMK__XA_OP_DIGEST, digest);
     pcmk__xml_free(args_xml);
     free(digest);
 }
 
 #define FAKE_TE_ID     "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
 
 /*!
  * \internal
  * \brief Create XML for resource operation history update
  *
  * \param[in,out] parent          Parent XML node to add to
  * \param[in,out] op              Operation event data
  * \param[in]     caller_version  DC feature set
  * \param[in]     target_rc       Expected result of operation
  * \param[in]     node            Name of node on which operation was performed
  * \param[in]     origin          Arbitrary description of update source
  *
  * \return Newly created XML node for history update
  */
 xmlNode *
 pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
                          const char *caller_version, int target_rc,
                          const char *node, const char *origin)
 {
     char *key = NULL;
     char *magic = NULL;
     char *op_id = NULL;
     char *op_id_additional = NULL;
     char *local_user_data = NULL;
     const char *exit_reason = NULL;
 
     xmlNode *xml_op = NULL;
     const char *task = NULL;
 
     CRM_CHECK(op != NULL, return NULL);
     crm_trace("Creating history XML for %s-interval %s action for %s on %s "
               "(DC version: %s, origin: %s)",
               pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
               ((node == NULL)? "no node" : node), caller_version, origin);
 
     task = op->op_type;
 
     /* Record a successful agent reload as a start, and a failed one as a
      * monitor, to make life easier for the scheduler when determining the
      * current state.
      *
      * @COMPAT We should check "reload" here only if the operation was for a
      * pre-OCF-1.1 resource agent, but we don't know that here, and we should
      * only ever get results for actions scheduled by us, so we can reasonably
      * assume any "reload" is actually a pre-1.1 agent reload.
      */
     if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
                          NULL)) {
         if (op->op_status == PCMK_EXEC_DONE) {
             task = PCMK_ACTION_START;
         } else {
             task = PCMK_ACTION_MONITOR;
         }
     }
 
     key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
     if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
         const char *n_type = crm_meta_value(op->params, "notify_type");
         const char *n_task = crm_meta_value(op->params, "notify_operation");
 
         CRM_LOG_ASSERT(n_type != NULL);
         CRM_LOG_ASSERT(n_task != NULL);
         op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
 
         if (op->op_status != PCMK_EXEC_PENDING) {
             /* Ignore notify errors.
              *
              * @TODO It might be better to keep the correct result here, and
              * ignore it in process_graph_event().
              */
             lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
         }
 
     /* Migration history is preserved separately, which usually matters for
      * multiple nodes and is important for future cluster transitions.
      */
     } else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
                                 PCMK_ACTION_MIGRATE_FROM, NULL)) {
         op_id = strdup(key);
 
     } else if (did_rsc_op_fail(op, target_rc)) {
         op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
         if (op->interval_ms == 0) {
             /* Ensure 'last' gets updated, in case PCMK_META_RECORD_PENDING is
              * true
              */
             op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
         }
         exit_reason = op->exit_reason;
 
     } else if (op->interval_ms > 0) {
         op_id = strdup(key);
 
     } else {
         op_id = pcmk__op_key(op->rsc_id, "last", 0);
     }
 
   again:
     xml_op = pcmk__xe_first_child(parent, PCMK__XE_LRM_RSC_OP, PCMK_XA_ID,
                                   op_id);
     if (xml_op == NULL) {
         xml_op = pcmk__xe_create(parent, PCMK__XE_LRM_RSC_OP);
     }
 
     if (op->user_data == NULL) {
         crm_debug("Generating fake transition key for: " PCMK__OP_FMT
                   " %d from %s", op->rsc_id, op->op_type, op->interval_ms,
                   op->call_id, origin);
         local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
                                                FAKE_TE_ID);
         op->user_data = local_user_data;
     }
 
     if (magic == NULL) {
         magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc,
                                   (const char *) op->user_data);
     }
 
     crm_xml_add(xml_op, PCMK_XA_ID, op_id);
     crm_xml_add(xml_op, PCMK__XA_OPERATION_KEY, key);
     crm_xml_add(xml_op, PCMK_XA_OPERATION, task);
     crm_xml_add(xml_op, PCMK_XA_CRM_DEBUG_ORIGIN, origin);
     crm_xml_add(xml_op, PCMK_XA_CRM_FEATURE_SET, caller_version);
     crm_xml_add(xml_op, PCMK__XA_TRANSITION_KEY, op->user_data);
     crm_xml_add(xml_op, PCMK__XA_TRANSITION_MAGIC, magic);
     crm_xml_add(xml_op, PCMK_XA_EXIT_REASON, pcmk__s(exit_reason, ""));
     crm_xml_add(xml_op, PCMK__META_ON_NODE, node); // For context during triage
 
     crm_xml_add_int(xml_op, PCMK__XA_CALL_ID, op->call_id);
     crm_xml_add_int(xml_op, PCMK__XA_RC_CODE, op->rc);
     crm_xml_add_int(xml_op, PCMK__XA_OP_STATUS, op->op_status);
     crm_xml_add_ms(xml_op, PCMK_META_INTERVAL, op->interval_ms);
 
     if ((op->t_run > 0) || (op->t_rcchange > 0) || (op->exec_time > 0)
         || (op->queue_time > 0)) {
 
         crm_trace("Timing data (" PCMK__OP_FMT "): "
-                  "last=%lld change=%u exec=%u queue=%u",
+                  "last=%lld change=%lld exec=%u queue=%u",
                   op->rsc_id, op->op_type, op->interval_ms,
-                  (long long) op->t_run, op->t_rcchange, op->exec_time,
-                  op->queue_time);
+                  (long long) op->t_run, (long long) op->t_rcchange,
+                  op->exec_time, op->queue_time);
 
         if ((op->interval_ms > 0) && (op->t_rcchange > 0)) {
             // Recurring ops may have changed rc after initial run
             crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
                            (long long) op->t_rcchange);
         } else {
             crm_xml_add_ll(xml_op, PCMK_XA_LAST_RC_CHANGE,
                            (long long) op->t_run);
         }
 
         crm_xml_add_int(xml_op, PCMK_XA_EXEC_TIME, op->exec_time);
         crm_xml_add_int(xml_op, PCMK_XA_QUEUE_TIME, op->queue_time);
     }
 
     if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
                          PCMK_ACTION_MIGRATE_FROM, NULL)) {
         /* Record PCMK__META_MIGRATE_SOURCE and PCMK__META_MIGRATE_TARGET always
          * for migrate ops.
          */
         const char *name = PCMK__META_MIGRATE_SOURCE;
 
         crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
 
         name = PCMK__META_MIGRATE_TARGET;
         crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
     }
 
     add_op_digest_to_xml(op, xml_op);
 
     if (op_id_additional) {
         free(op_id);
         op_id = op_id_additional;
         op_id_additional = NULL;
         goto again;
     }
 
     if (local_user_data) {
         free(local_user_data);
         op->user_data = NULL;
     }
     free(magic);
     free(op_id);
     free(key);
     return xml_op;
 }
 
 /*!
  * \internal
  * \brief Check whether an action shutdown-locks a resource to a node
  *
  * If the PCMK_OPT_SHUTDOWN_LOCK cluster property is set, resources will not be
  * recovered on a different node if cleanly stopped, and may start only on that
  * same node. This function checks whether that applies to a given action, so
  * that the transition graph can be marked appropriately.
  *
  * \param[in] action  Action to check
  *
  * \return true if \p action locks its resource to the action's node,
  *         otherwise false
  */
 bool
 pcmk__action_locks_rsc_to_node(const pcmk_action_t *action)
 {
     // Only resource actions taking place on resource's lock node are locked
     if ((action == NULL) || (action->rsc == NULL)
         || !pcmk__same_node(action->node, action->rsc->private->lock_node)) {
         return false;
     }
 
     /* During shutdown, only stops are locked (otherwise, another action such as
      * a demote would cause the controller to clear the lock)
      */
     if (action->node->details->shutdown && (action->task != NULL)
         && (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
         return false;
     }
 
     return true;
 }
 
 /* lowest to highest */
 static gint
 sort_action_id(gconstpointer a, gconstpointer b)
 {
     const pcmk__related_action_t *action_wrapper2 = a;
     const pcmk__related_action_t *action_wrapper1 = b;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
     if (action_wrapper1->action->id < action_wrapper2->action->id) {
         return 1;
     }
     if (action_wrapper1->action->id > action_wrapper2->action->id) {
         return -1;
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Remove any duplicate action inputs, merging action flags
  *
  * \param[in,out] action  Action whose inputs should be checked
  */
 void
 pcmk__deduplicate_action_inputs(pcmk_action_t *action)
 {
     GList *item = NULL;
     GList *next = NULL;
     pcmk__related_action_t *last_input = NULL;
 
     action->actions_before = g_list_sort(action->actions_before,
                                          sort_action_id);
     for (item = action->actions_before; item != NULL; item = next) {
         pcmk__related_action_t *input = item->data;
 
         next = item->next;
         if ((last_input != NULL)
             && (input->action->id == last_input->action->id)) {
             crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
                       input->action->uuid, input->action->id,
                       action->uuid, action->id);
 
             /* For the purposes of scheduling, the ordering flags no longer
              * matter, but crm_simulate looks at certain ones when creating a
              * dot graph. Combining the flags is sufficient for that purpose.
              */
             last_input->type |= input->type;
             if (input->state == pe_link_dumped) {
                 last_input->state = pe_link_dumped;
             }
 
             free(item->data);
             action->actions_before = g_list_delete_link(action->actions_before,
                                                         item);
         } else {
             last_input = input;
             input->state = pe_link_not_dumped;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Output all scheduled actions
  *
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__output_actions(pcmk_scheduler_t *scheduler)
 {
     pcmk__output_t *out = scheduler->priv;
 
     // Output node (non-resource) actions
     for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
         char *node_name = NULL;
         char *task = NULL;
         pcmk_action_t *action = (pcmk_action_t *) iter->data;
 
         if (action->rsc != NULL) {
             continue; // Resource actions will be output later
 
         } else if (pcmk_is_set(action->flags, pcmk_action_optional)) {
             continue; // This action was not scheduled
         }
 
         if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
                          pcmk__str_none)) {
             task = strdup("Shutdown");
 
         } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
                                 pcmk__str_none)) {
             const char *op = g_hash_table_lookup(action->meta,
                                                  PCMK__META_STONITH_ACTION);
 
             task = crm_strdup_printf("Fence (%s)", op);
 
         } else {
             continue; // Don't display other node action types
         }
 
         if (pcmk__is_guest_or_bundle_node(action->node)) {
             const pcmk_resource_t *remote = action->node->private->remote;
 
             node_name = crm_strdup_printf("%s (resource: %s)",
                                           pcmk__node_name(action->node),
                                           remote->private->launcher->id);
         } else if (action->node != NULL) {
             node_name = crm_strdup_printf("%s", pcmk__node_name(action->node));
         }
 
         out->message(out, "node-action", task, node_name, action->reason);
 
         free(node_name);
         free(task);
     }
 
     // Output resource actions
     for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
 
         rsc->private->cmds->output_actions(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Get action name needed to compare digest for configuration changes
  *
  * \param[in] task         Action name from history
  * \param[in] interval_ms  Action interval (in milliseconds)
  *
  * \return Action name whose digest should be compared
  */
 static const char *
 task_for_digest(const char *task, guint interval_ms)
 {
     /* Certain actions need to be compared against the parameters used to start
      * the resource.
      */
     if ((interval_ms == 0)
         && pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
                             PCMK_ACTION_PROMOTE, NULL)) {
         task = PCMK_ACTION_START;
     }
     return task;
 }
 
 /*!
  * \internal
  * \brief Check whether only sanitized parameters to an action changed
  *
  * When collecting CIB files for troubleshooting, crm_report will mask
  * sensitive resource parameters. If simulations were run using that, affected
  * resources would appear to need a restart, which would complicate
  * troubleshooting. To avoid that, we save a "secure digest" of non-sensitive
  * parameters. This function used that digest to check whether only masked
  * parameters are different.
  *
  * \param[in] xml_op       Resource history entry with secure digest
  * \param[in] digest_data  Operation digest information being compared
  * \param[in] scheduler    Scheduler data
  *
  * \return true if only sanitized parameters changed, otherwise false
  */
 static bool
 only_sanitized_changed(const xmlNode *xml_op,
                        const pcmk__op_digest_t *digest_data,
                        const pcmk_scheduler_t *scheduler)
 {
     const char *digest_secure = NULL;
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)) {
         // The scheduler is not being run as a simulation
         return false;
     }
 
     digest_secure = crm_element_value(xml_op, PCMK__XA_OP_SECURE_DIGEST);
 
     return (digest_data->rc != pcmk__digest_match) && (digest_secure != NULL)
            && (digest_data->digest_secure_calc != NULL)
            && (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
 }
 
 /*!
  * \internal
  * \brief Force a restart due to a configuration change
  *
  * \param[in,out] rsc          Resource that action is for
  * \param[in]     task         Name of action whose configuration changed
  * \param[in]     interval_ms  Action interval (in milliseconds)
  * \param[in,out] node         Node where resource should be restarted
  */
 static void
 force_restart(pcmk_resource_t *rsc, const char *task, guint interval_ms,
               pcmk_node_t *node)
 {
     char *key = pcmk__op_key(rsc->id, task, interval_ms);
     pcmk_action_t *required = custom_action(rsc, key, task, NULL, FALSE,
                                             rsc->private->scheduler);
 
     pe_action_set_reason(required, "resource definition change", true);
     trigger_unfencing(rsc, node, "Device parameters changed", NULL,
                       rsc->private->scheduler);
 }
 
 /*!
  * \internal
  * \brief Schedule a reload of a resource on a node
  *
  * \param[in,out] data       Resource to reload
  * \param[in]     user_data  Where resource should be reloaded
  */
 static void
 schedule_reload(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *rsc = data;
     const pcmk_node_t *node = user_data;
 
     pcmk_action_t *reload = NULL;
 
     // For collective resources, just call recursively for children
     if (rsc->private->variant > pcmk__rsc_variant_primitive) {
         g_list_foreach(rsc->private->children, schedule_reload, user_data);
         return;
     }
 
     // Skip the reload in certain situations
     if ((node == NULL)
         || !pcmk_is_set(rsc->flags, pcmk__rsc_managed)
         || pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
         pcmk__rsc_trace(rsc, "Skip reload of %s:%s%s %s",
                         rsc->id,
                         pcmk_is_set(rsc->flags, pcmk__rsc_managed)? "" : " unmanaged",
                         pcmk_is_set(rsc->flags, pcmk__rsc_failed)? " failed" : "",
                         (node == NULL)? "inactive" : node->private->name);
         return;
     }
 
     /* If a resource's configuration changed while a start was pending,
      * force a full restart instead of a reload.
      */
     if (pcmk_is_set(rsc->flags, pcmk__rsc_start_pending)) {
         pcmk__rsc_trace(rsc,
                         "%s: preventing agent reload because start pending",
                         rsc->id);
         custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
                       rsc->private->scheduler);
         return;
     }
 
     // Schedule the reload
     pcmk__set_rsc_flags(rsc, pcmk__rsc_reload);
     reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
                            FALSE, rsc->private->scheduler);
     pe_action_set_reason(reload, "resource definition change", FALSE);
 
     // Set orderings so that a required stop or demote cancels the reload
     pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
                        pcmk__ar_ordered|pcmk__ar_then_cancels_first,
                        rsc->private->scheduler);
     pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
                        pcmk__ar_ordered|pcmk__ar_then_cancels_first,
                        rsc->private->scheduler);
 }
 
 /*!
  * \internal
  * \brief Handle any configuration change for an action
  *
  * Given an action from resource history, if the resource's configuration
  * changed since the action was done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, etc.).
  *
  * \param[in,out] rsc     Resource that action is for
  * \param[in,out] node    Node that action was on
  * \param[in]     xml_op  Action XML from resource history
  *
  * \return true if action configuration changed, otherwise false
  */
 bool
 pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
                           const xmlNode *xml_op)
 {
     guint interval_ms = 0;
     const char *task = NULL;
     const pcmk__op_digest_t *digest_data = NULL;
 
     CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL),
               return false);
 
     task = crm_element_value(xml_op, PCMK_XA_OPERATION);
     CRM_CHECK(task != NULL, return false);
 
     crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &interval_ms);
 
     // If this is a recurring action, check whether it has been orphaned
     if (interval_ms > 0) {
         if (pcmk__find_action_config(rsc, task, interval_ms, false) != NULL) {
             pcmk__rsc_trace(rsc,
                             "%s-interval %s for %s on %s is in configuration",
                             pcmk__readable_interval(interval_ms), task, rsc->id,
                             pcmk__node_name(node));
         } else if (pcmk_is_set(rsc->private->scheduler->flags,
                                pcmk_sched_cancel_removed_actions)) {
             pcmk__schedule_cancel(rsc,
                                   crm_element_value(xml_op, PCMK__XA_CALL_ID),
                                   task, interval_ms, node, "orphan");
             return true;
         } else {
             pcmk__rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned",
                             pcmk__readable_interval(interval_ms), task, rsc->id,
                             pcmk__node_name(node));
             return true;
         }
     }
 
     crm_trace("Checking %s-interval %s for %s on %s for configuration changes",
               pcmk__readable_interval(interval_ms), task, rsc->id,
               pcmk__node_name(node));
     task = task_for_digest(task, interval_ms);
     digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
                                         rsc->private->scheduler);
 
     if (only_sanitized_changed(xml_op, digest_data, rsc->private->scheduler)) {
         if (!pcmk__is_daemon && (rsc->private->scheduler->priv != NULL)) {
             pcmk__output_t *out = rsc->private->scheduler->priv;
 
             out->info(out,
                       "Only 'private' parameters to %s-interval %s for %s "
                       "on %s changed: %s",
                       pcmk__readable_interval(interval_ms), task, rsc->id,
                       pcmk__node_name(node),
                       crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC));
         }
         return false;
     }
 
     switch (digest_data->rc) {
         case pcmk__digest_restart:
             crm_log_xml_debug(digest_data->params_restart, "params:restart");
             force_restart(rsc, task, interval_ms, node);
             return true;
 
         case pcmk__digest_unknown:
         case pcmk__digest_mismatch:
             // Changes that can potentially be handled by an agent reload
 
             if (interval_ms > 0) {
                 /* Recurring actions aren't reloaded per se, they are just
                  * re-scheduled so the next run uses the new parameters.
                  * The old instance will be cancelled automatically.
                  */
                 crm_log_xml_debug(digest_data->params_all, "params:reschedule");
                 pcmk__reschedule_recurring(rsc, task, interval_ms, node);
 
             } else if (crm_element_value(xml_op,
                                          PCMK__XA_OP_RESTART_DIGEST) != NULL) {
                 // Agent supports reload, so use it
                 trigger_unfencing(rsc, node,
                                   "Device parameters changed (reload)", NULL,
                                   rsc->private->scheduler);
                 crm_log_xml_debug(digest_data->params_all, "params:reload");
                 schedule_reload((gpointer) rsc, (gpointer) node);
 
             } else {
                 pcmk__rsc_trace(rsc,
                                 "Restarting %s "
                                 "because agent doesn't support reload",
                                 rsc->id);
                 crm_log_xml_debug(digest_data->params_restart,
                                   "params:restart");
                 force_restart(rsc, task, interval_ms, node);
             }
             return true;
 
         default:
             break;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Create a list of resource's action history entries, sorted by call ID
  *
  * \param[in]  rsc_entry    Resource's \c PCMK__XE_LRM_RSC_OP status XML
  * \param[out] start_index  Where to store index of start-like action, if any
  * \param[out] stop_index   Where to store index of stop action, if any
  */
 static GList *
 rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
 {
     GList *ops = NULL;
 
     for (xmlNode *rsc_op = pcmk__xe_first_child(rsc_entry, PCMK__XE_LRM_RSC_OP,
                                                 NULL, NULL);
          rsc_op != NULL; rsc_op = pcmk__xe_next_same(rsc_op)) {
 
         ops = g_list_prepend(ops, rsc_op);
     }
     ops = g_list_sort(ops, sort_op_by_callid);
     calculate_active_ops(ops, start_index, stop_index);
     return ops;
 }
 
 /*!
  * \internal
  * \brief Process a resource's action history from the CIB status
  *
  * Given a resource's action history, if the resource's configuration
  * changed since the actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in]     rsc_entry  Resource's \c PCMK__XE_LRM_RSC_OP status XML
  * \param[in,out] rsc        Resource whose history is being processed
  * \param[in,out] node       Node whose history is being processed
  */
 static void
 process_rsc_history(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
                     pcmk_node_t *node)
 {
     int offset = -1;
     int stop_index = 0;
     int start_index = 0;
     GList *sorted_op_list = NULL;
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
         if (pcmk__is_anonymous_clone(pe__const_top_resource(rsc, false))) {
             pcmk__rsc_trace(rsc,
                             "Skipping configuration check "
                             "for orphaned clone instance %s",
                             rsc->id);
         } else {
             pcmk__rsc_trace(rsc,
                             "Skipping configuration check and scheduling "
                             "clean-up for orphaned resource %s", rsc->id);
             pcmk__schedule_cleanup(rsc, node, false);
         }
         return;
     }
 
     if (pe_find_node_id(rsc->private->active_nodes,
                         node->private->id) == NULL) {
         if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) {
             pcmk__schedule_cleanup(rsc, node, false);
         }
         pcmk__rsc_trace(rsc,
                         "Skipping configuration check for %s "
                         "because no longer active on %s",
                         rsc->id, pcmk__node_name(node));
         return;
     }
 
     pcmk__rsc_trace(rsc, "Checking for configuration changes for %s on %s",
                     rsc->id, pcmk__node_name(node));
 
     if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) {
         pcmk__schedule_cleanup(rsc, node, false);
     }
 
     sorted_op_list = rsc_history_as_list(rsc_entry, &start_index, &stop_index);
     if (start_index < stop_index) {
         return; // Resource is stopped
     }
 
     for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
         xmlNode *rsc_op = (xmlNode *) iter->data;
         const char *task = NULL;
         guint interval_ms = 0;
 
         if (++offset < start_index) {
             // Skip actions that happened before a start
             continue;
         }
 
         task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
         crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
 
         if ((interval_ms > 0)
             && (pcmk_is_set(rsc->flags, pcmk__rsc_maintenance)
                 || node->details->maintenance)) {
             // Maintenance mode cancels recurring operations
             pcmk__schedule_cancel(rsc,
                                   crm_element_value(rsc_op, PCMK__XA_CALL_ID),
                                   task, interval_ms, node, "maintenance mode");
 
         } else if ((interval_ms > 0)
                    || pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
                                            PCMK_ACTION_START,
                                            PCMK_ACTION_PROMOTE,
                                            PCMK_ACTION_MIGRATE_FROM, NULL)) {
             /* If a resource operation failed, and the operation's definition
              * has changed, clear any fail count so they can be retried fresh.
              */
 
             if (pe__bundle_needs_remote_name(rsc)) {
                 /* We haven't assigned resources to nodes yet, so if the
                  * REMOTE_CONTAINER_HACK is used, we may calculate the digest
                  * based on the literal "#uname" value rather than the properly
                  * substituted value. That would mistakenly make the action
                  * definition appear to have been changed. Defer the check until
                  * later in this case.
                  */
                 pe__add_param_check(rsc_op, rsc, node, pcmk__check_active,
                                     rsc->private->scheduler);
 
             } else if (pcmk__check_action_config(rsc, node, rsc_op)
                        && (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
                                             NULL) != 0)) {
                 pe__clear_failcount(rsc, node, "action definition changed",
                                     rsc->private->scheduler);
             }
         }
     }
     g_list_free(sorted_op_list);
 }
 
 /*!
  * \internal
  * \brief Process a node's action history from the CIB status
  *
  * Given a node's resource history, if the resource's configuration changed
  * since the actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in,out] node      Node whose history is being processed
  * \param[in]     lrm_rscs  Node's \c PCMK__XE_LRM_RESOURCES from CIB status XML
  */
 static void
 process_node_history(pcmk_node_t *node, const xmlNode *lrm_rscs)
 {
     crm_trace("Processing node history for %s", pcmk__node_name(node));
     for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rscs,
                                                          PCMK__XE_LRM_RESOURCE,
                                                          NULL, NULL);
          rsc_entry != NULL; rsc_entry = pcmk__xe_next_same(rsc_entry)) {
 
         if (rsc_entry->children != NULL) {
             GList *result = pcmk__rscs_matching_id(pcmk__xe_id(rsc_entry),
                                                    node->private->scheduler);
 
             for (GList *iter = result; iter != NULL; iter = iter->next) {
                 pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
 
                 if (pcmk__is_primitive(rsc)) {
                     process_rsc_history(rsc_entry, rsc, node);
                 }
             }
             g_list_free(result);
         }
     }
 }
 
 // XPath to find a node's resource history
 #define XPATH_NODE_HISTORY "/" PCMK_XE_CIB "/" PCMK_XE_STATUS   \
                            "/" PCMK__XE_NODE_STATE              \
                            "[@" PCMK_XA_UNAME "='%s']"          \
                            "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES
 
 /*!
  * \internal
  * \brief Process any resource configuration changes in the CIB status
  *
  * Go through all nodes' resource history, and if a resource's configuration
  * changed since its actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler)
 {
     crm_trace("Check resource and action configuration for changes");
 
     /* Rather than iterate through the status section, iterate through the nodes
      * and search for the appropriate status subsection for each. This skips
      * orphaned nodes and lets us eliminate some cases before searching the XML.
      */
     for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
         pcmk_node_t *node = (pcmk_node_t *) iter->data;
 
         /* Don't bother checking actions for a node that can't run actions ...
          * unless it's in maintenance mode, in which case we still need to
          * cancel any existing recurring monitors.
          */
         if (node->details->maintenance
             || pcmk__node_available(node, false, false)) {
 
             char *xpath = NULL;
             xmlNode *history = NULL;
 
             xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->private->name);
             history = get_xpath_object(xpath, scheduler->input, LOG_NEVER);
             free(xpath);
 
             process_node_history(node, history);
         }
     }
 }