diff --git a/daemons/controld/controld_cib.c b/daemons/controld/controld_cib.c
index fd75a3794b..47fb5c6e69 100644
--- a/daemons/controld/controld_cib.c
+++ b/daemons/controld/controld_cib.c
@@ -1,1069 +1,1066 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <unistd.h>  /* sleep */
 
 #include <crm/common/alerts_internal.h>
 #include <crm/common/xml.h>
 #include <crm/crm.h>
 #include <crm/lrmd_internal.h>
 
 #include <pacemaker-controld.h>
 
 // Call ID of the most recent in-progress CIB resource update (or 0 if none)
 static int pending_rsc_update = 0;
 
 /*!
  * \internal
  * \brief Respond to a dropped CIB connection
  *
  * \param[in] user_data  CIB connection that dropped
  */
 static void
 handle_cib_disconnect(gpointer user_data)
 {
     CRM_LOG_ASSERT(user_data == controld_globals.cib_conn);
 
     controld_trigger_fsa();
     controld_globals.cib_conn->state = cib_disconnected;
 
     if (pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) {
         // @TODO This should trigger a reconnect, not a shutdown
         crm_crit("Lost connection to the CIB manager, shutting down");
         register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
         controld_clear_fsa_input_flags(R_CIB_CONNECTED);
 
     } else { // Expected
         crm_info("Disconnected from the CIB manager");
     }
 }
 
 static void
 do_cib_updated(const char *event, xmlNode * msg)
 {
     const xmlNode *patchset = NULL;
     const char *client_name = NULL;
 
     crm_debug("Received CIB diff notification: DC=%s", pcmk__btoa(AM_I_DC));
 
     if (cib__get_notify_patchset(msg, &patchset) != pcmk_rc_ok) {
         return;
     }
 
     if (cib__element_in_patchset(patchset, PCMK_XE_ALERTS)
         || cib__element_in_patchset(patchset, PCMK_XE_CRM_CONFIG)) {
 
         controld_trigger_config();
     }
 
     if (!AM_I_DC) {
         // We're not in control of the join sequence
         return;
     }
 
     client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTNAME);
     if (!cib__client_triggers_refresh(client_name)) {
         // The CIB is still accurate
         return;
     }
 
     if (cib__element_in_patchset(patchset, PCMK_XE_NODES)
         || cib__element_in_patchset(patchset, PCMK_XE_STATUS)) {
 
         /* An unsafe client modified the PCMK_XE_NODES or PCMK_XE_STATUS
          * section. Ensure the node list is up-to-date, and start the join
          * process again so we get everyone's current resource history.
          */
         if (client_name == NULL) {
             client_name = crm_element_value(msg, PCMK__XA_CIB_CLIENTID);
         }
         crm_notice("Populating nodes and starting an election after %s event "
                    "triggered by %s",
                    event, pcmk__s(client_name, "(unidentified client)"));
 
         populate_cib_nodes(node_update_quick|node_update_all, __func__);
         register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL);
     }
 }
 
 void
 controld_disconnect_cib_manager(void)
 {
     cib_t *cib_conn = controld_globals.cib_conn;
 
     CRM_ASSERT(cib_conn != NULL);
 
     crm_debug("Disconnecting from the CIB manager");
 
     controld_clear_fsa_input_flags(R_CIB_CONNECTED);
 
     cib_conn->cmds->del_notify_callback(cib_conn, PCMK__VALUE_CIB_DIFF_NOTIFY,
                                         do_cib_updated);
     cib_free_callbacks(cib_conn);
 
     if (cib_conn->state != cib_disconnected) {
         cib_conn->cmds->set_secondary(cib_conn,
                                       cib_scope_local|cib_discard_reply);
         cib_conn->cmds->signoff(cib_conn);
     }
 }
 
 /* A_CIB_STOP, A_CIB_START, O_CIB_RESTART */
 void
 do_cib_control(long long action,
                enum crmd_fsa_cause cause,
                enum crmd_fsa_state cur_state,
                enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     static int cib_retries = 0;
 
     cib_t *cib_conn = controld_globals.cib_conn;
 
     void (*dnotify_fn) (gpointer user_data) = handle_cib_disconnect;
     void (*update_cb) (const char *event, xmlNodePtr msg) = do_cib_updated;
 
     int rc = pcmk_ok;
 
     CRM_ASSERT(cib_conn != NULL);
 
     if (pcmk_is_set(action, A_CIB_STOP)) {
         if ((cib_conn->state != cib_disconnected)
             && (pending_rsc_update != 0)) {
 
             crm_info("Waiting for resource update %d to complete",
                      pending_rsc_update);
             crmd_fsa_stall(FALSE);
             return;
         }
         controld_disconnect_cib_manager();
     }
 
     if (!pcmk_is_set(action, A_CIB_START)) {
         return;
     }
 
     if (cur_state == S_STOPPING) {
         crm_err("Ignoring request to connect to the CIB manager after "
                 "shutdown");
         return;
     }
 
     rc = cib_conn->cmds->signon(cib_conn, CRM_SYSTEM_CRMD,
                                 cib_command_nonblocking);
 
     if (rc != pcmk_ok) {
         // A short wait that usually avoids stalling the FSA
         sleep(1);
         rc = cib_conn->cmds->signon(cib_conn, CRM_SYSTEM_CRMD,
                                     cib_command_nonblocking);
     }
 
     if (rc != pcmk_ok) {
         crm_info("Could not connect to the CIB manager: %s", pcmk_strerror(rc));
 
     } else if (cib_conn->cmds->set_connection_dnotify(cib_conn,
                                                       dnotify_fn) != pcmk_ok) {
         crm_err("Could not set dnotify callback");
 
     } else if (cib_conn->cmds->add_notify_callback(cib_conn,
                                                    PCMK__VALUE_CIB_DIFF_NOTIFY,
                                                    update_cb) != pcmk_ok) {
         crm_err("Could not set CIB notification callback (update)");
 
     } else {
         controld_set_fsa_input_flags(R_CIB_CONNECTED);
         cib_retries = 0;
     }
 
     if (!pcmk_is_set(controld_globals.fsa_input_register, R_CIB_CONNECTED)) {
         cib_retries++;
 
         if (cib_retries < 30) {
             crm_warn("Couldn't complete CIB registration %d times... "
                      "pause and retry", cib_retries);
             controld_start_wait_timer();
             crmd_fsa_stall(FALSE);
 
         } else {
             crm_err("Could not complete CIB registration %d times... "
                     "hard error", cib_retries);
             register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
         }
     }
 }
 
 #define MIN_CIB_OP_TIMEOUT (30)
 
 /*!
  * \internal
  * \brief Get the timeout (in seconds) that should be used with CIB operations
  *
  * \return The maximum of 30 seconds, the value of the PCMK_cib_timeout
  *         environment variable, or 10 seconds times one more than the number of
  *         nodes in the cluster.
  */
 unsigned int
 cib_op_timeout(void)
 {
     // @COMPAT: Drop env_timeout at 3.0.0
     static int env_timeout = -1;
     unsigned int calculated_timeout = 0;
 
     if (env_timeout == -1) {
         const char *env = pcmk__env_option(PCMK__ENV_CIB_TIMEOUT);
 
         pcmk__scan_min_int(env, &env_timeout, MIN_CIB_OP_TIMEOUT);
         crm_trace("Minimum CIB op timeout: %ds (environment: %s)",
                   env_timeout, (env? env : "none"));
     }
 
-    calculated_timeout = 1 + crm_active_peers();
-    if (crm_remote_peer_cache) {
-        calculated_timeout += g_hash_table_size(crm_remote_peer_cache);
-    }
-    calculated_timeout *= 10;
-
+    calculated_timeout = 10U * (1U
+                                + crm_active_peers()
+                                + pcmk__cluster_num_remote_nodes());
     calculated_timeout = QB_MAX(calculated_timeout, env_timeout);
     crm_trace("Calculated timeout: %us", calculated_timeout);
 
     if (controld_globals.cib_conn) {
         controld_globals.cib_conn->call_timeout = calculated_timeout;
     }
     return calculated_timeout;
 }
 
 /*!
  * \internal
  * \brief Get CIB call options to use local scope if primary is unavailable
  *
  * \return CIB call options
  */
 int
 crmd_cib_smart_opt(void)
 {
     int call_opt = cib_none;
 
     if ((controld_globals.fsa_state == S_ELECTION)
         || (controld_globals.fsa_state == S_PENDING)) {
         crm_info("Sending update to local CIB in state: %s",
                  fsa_state2string(controld_globals.fsa_state));
         cib__set_call_options(call_opt, "update", cib_scope_local);
     }
     return call_opt;
 }
 
 static void
 cib_delete_callback(xmlNode *msg, int call_id, int rc, xmlNode *output,
                     void *user_data)
 {
     char *desc = user_data;
 
     if (rc == 0) {
         crm_debug("Deletion of %s (via CIB call %d) succeeded", desc, call_id);
     } else {
         crm_warn("Deletion of %s (via CIB call %d) failed: %s " CRM_XS " rc=%d",
                  desc, call_id, pcmk_strerror(rc), rc);
     }
 }
 
 // Searches for various portions of PCMK__XE_NODE_STATE to delete
 
 // Match a particular node's PCMK__XE_NODE_STATE (takes node name 1x)
 #define XPATH_NODE_STATE "//" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
 
 // Node's lrm section (name 1x)
 #define XPATH_NODE_LRM XPATH_NODE_STATE "/" PCMK__XE_LRM
 
 /* Node's PCMK__XE_LRM_RSC_OP entries and PCMK__XE_LRM_RESOURCE entries without
  * unexpired lock
  * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x)
  */
 #define XPATH_NODE_LRM_UNLOCKED XPATH_NODE_STATE "//" PCMK__XE_LRM_RSC_OP   \
                                 "|" XPATH_NODE_STATE                        \
                                 "//" PCMK__XE_LRM_RESOURCE                  \
                                 "[not(@" PCMK_OPT_SHUTDOWN_LOCK ") "        \
                                     "or " PCMK_OPT_SHUTDOWN_LOCK "<%lld]"
 
 // Node's PCMK__XE_TRANSIENT_ATTRIBUTES section (name 1x)
 #define XPATH_NODE_ATTRS XPATH_NODE_STATE "/" PCMK__XE_TRANSIENT_ATTRIBUTES
 
 // Everything under PCMK__XE_NODE_STATE (name 1x)
 #define XPATH_NODE_ALL          XPATH_NODE_STATE "/*"
 
 /* Unlocked history + transient attributes
  * (name 2x, (seconds_since_epoch - PCMK_OPT_SHUTDOWN_LOCK_LIMIT) 1x, name 1x)
  */
 #define XPATH_NODE_ALL_UNLOCKED XPATH_NODE_LRM_UNLOCKED "|" XPATH_NODE_ATTRS
 
 /*!
  * \internal
  * \brief Get the XPath and description of a node state section to be deleted
  *
  * \param[in]  uname    Desired node
  * \param[in]  section  Subsection of \c PCMK__XE_NODE_STATE to be deleted
  * \param[out] xpath    Where to store XPath of \p section
  * \param[out] desc     If not \c NULL, where to store description of \p section
  */
 void
 controld_node_state_deletion_strings(const char *uname,
                                      enum controld_section_e section,
                                      char **xpath, char **desc)
 {
     const char *desc_pre = NULL;
 
     // Shutdown locks that started before this time are expired
     long long expire = (long long) time(NULL)
                        - controld_globals.shutdown_lock_limit;
 
     switch (section) {
         case controld_section_lrm:
             *xpath = crm_strdup_printf(XPATH_NODE_LRM, uname);
             desc_pre = "resource history";
             break;
         case controld_section_lrm_unlocked:
             *xpath = crm_strdup_printf(XPATH_NODE_LRM_UNLOCKED,
                                        uname, uname, expire);
             desc_pre = "resource history (other than shutdown locks)";
             break;
         case controld_section_attrs:
             *xpath = crm_strdup_printf(XPATH_NODE_ATTRS, uname);
             desc_pre = "transient attributes";
             break;
         case controld_section_all:
             *xpath = crm_strdup_printf(XPATH_NODE_ALL, uname);
             desc_pre = "all state";
             break;
         case controld_section_all_unlocked:
             *xpath = crm_strdup_printf(XPATH_NODE_ALL_UNLOCKED,
                                        uname, uname, expire, uname);
             desc_pre = "all state (other than shutdown locks)";
             break;
         default:
             // We called this function incorrectly
             CRM_ASSERT(false);
             break;
     }
 
     if (desc != NULL) {
         *desc = crm_strdup_printf("%s for node %s", desc_pre, uname);
     }
 }
 
 /*!
  * \internal
  * \brief Delete subsection of a node's CIB \c PCMK__XE_NODE_STATE
  *
  * \param[in] uname    Desired node
  * \param[in] section  Subsection of \c PCMK__XE_NODE_STATE to delete
  * \param[in] options  CIB call options to use
  */
 void
 controld_delete_node_state(const char *uname, enum controld_section_e section,
                            int options)
 {
     cib_t *cib = controld_globals.cib_conn;
     char *xpath = NULL;
     char *desc = NULL;
     int cib_rc = pcmk_ok;
 
     CRM_ASSERT((uname != NULL) && (cib != NULL));
 
     controld_node_state_deletion_strings(uname, section, &xpath, &desc);
 
     cib__set_call_options(options, "node state deletion",
                           cib_xpath|cib_multiple);
     cib_rc = cib->cmds->remove(cib, xpath, NULL, options);
     fsa_register_cib_callback(cib_rc, desc, cib_delete_callback);
     crm_info("Deleting %s (via CIB call %d) " CRM_XS " xpath=%s",
              desc, cib_rc, xpath);
 
     // CIB library handles freeing desc
     free(xpath);
 }
 
 // Takes node name and resource ID
 #define XPATH_RESOURCE_HISTORY "//" PCMK__XE_NODE_STATE                 \
                                "[@" PCMK_XA_UNAME "='%s']/"             \
                                PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES  \
                                "/" PCMK__XE_LRM_RESOURCE                \
                                "[@" PCMK_XA_ID "='%s']"
 // @TODO could add "and @PCMK_OPT_SHUTDOWN_LOCK" to limit to locks
 
 /*!
  * \internal
  * \brief Clear resource history from CIB for a given resource and node
  *
  * \param[in]  rsc_id        ID of resource to be cleared
  * \param[in]  node          Node whose resource history should be cleared
  * \param[in]  user_name     ACL user name to use
  * \param[in]  call_options  CIB call options
  *
  * \return Standard Pacemaker return code
  */
 int
 controld_delete_resource_history(const char *rsc_id, const char *node,
                                  const char *user_name, int call_options)
 {
     char *desc = NULL;
     char *xpath = NULL;
     int rc = pcmk_rc_ok;
     cib_t *cib = controld_globals.cib_conn;
 
     CRM_CHECK((rsc_id != NULL) && (node != NULL), return EINVAL);
 
     desc = crm_strdup_printf("resource history for %s on %s", rsc_id, node);
     if (cib == NULL) {
         crm_err("Unable to clear %s: no CIB connection", desc);
         free(desc);
         return ENOTCONN;
     }
 
     // Ask CIB to delete the entry
     xpath = crm_strdup_printf(XPATH_RESOURCE_HISTORY, node, rsc_id);
 
     cib->cmds->set_user(cib, user_name);
     rc = cib->cmds->remove(cib, xpath, NULL, call_options|cib_xpath);
     cib->cmds->set_user(cib, NULL);
 
     if (rc < 0) {
         rc = pcmk_legacy2rc(rc);
         crm_err("Could not delete resource status of %s on %s%s%s: %s "
                 CRM_XS " rc=%d", rsc_id, node,
                 (user_name? " for user " : ""), (user_name? user_name : ""),
                 pcmk_rc_str(rc), rc);
         free(desc);
         free(xpath);
         return rc;
     }
 
     if (pcmk_is_set(call_options, cib_sync_call)) {
         if (pcmk_is_set(call_options, cib_dryrun)) {
             crm_debug("Deletion of %s would succeed", desc);
         } else {
             crm_debug("Deletion of %s succeeded", desc);
         }
         free(desc);
 
     } else {
         crm_info("Clearing %s (via CIB call %d) " CRM_XS " xpath=%s",
                  desc, rc, xpath);
         fsa_register_cib_callback(rc, desc, cib_delete_callback);
         // CIB library handles freeing desc
     }
 
     free(xpath);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Build XML and string of parameters meeting some criteria, for digest
  *
  * \param[in]  op          Executor event with parameter table to use
  * \param[in]  metadata    Parsed meta-data for executed resource agent
  * \param[in]  param_type  Flag used for selection criteria
  * \param[out] result      Will be set to newly created XML with selected
  *                         parameters as attributes
  *
  * \return Newly allocated space-separated string of parameter names
  * \note Selection criteria varies by param_type: for the restart digest, we
  *       want parameters that are *not* marked reloadable (OCF 1.1) or that
  *       *are* marked unique (pre-1.1), for both string and XML results; for the
  *       secure digest, we want parameters that *are* marked private for the
  *       string, but parameters that are *not* marked private for the XML.
  * \note It is the caller's responsibility to free the string return value with
  *       \p g_string_free() and the XML result with \p free_xml().
  */
 static GString *
 build_parameter_list(const lrmd_event_data_t *op,
                      const struct ra_metadata_s *metadata,
                      enum ra_param_flags_e param_type, xmlNode **result)
 {
     GString *list = NULL;
 
     *result = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
 
     /* Consider all parameters only except private ones to be consistent with
      * what scheduler does with calculate_secure_digest().
      */
     if (param_type == ra_param_private
         && compare_version(controld_globals.dc_version, "3.16.0") >= 0) {
         g_hash_table_foreach(op->params, hash2field, *result);
         pcmk__filter_op_for_digest(*result);
     }
 
     for (GList *iter = metadata->ra_params; iter != NULL; iter = iter->next) {
         struct ra_param_s *param = (struct ra_param_s *) iter->data;
 
         bool accept_for_list = false;
         bool accept_for_xml = false;
 
         switch (param_type) {
             case ra_param_reloadable:
                 accept_for_list = !pcmk_is_set(param->rap_flags, param_type);
                 accept_for_xml = accept_for_list;
                 break;
 
             case ra_param_unique:
                 accept_for_list = pcmk_is_set(param->rap_flags, param_type);
                 accept_for_xml = accept_for_list;
                 break;
 
             case ra_param_private:
                 accept_for_list = pcmk_is_set(param->rap_flags, param_type);
                 accept_for_xml = !accept_for_list;
                 break;
         }
 
         if (accept_for_list) {
             crm_trace("Attr %s is %s", param->rap_name, ra_param_flag2text(param_type));
 
             if (list == NULL) {
                 // We will later search for " WORD ", so start list with a space
                 pcmk__add_word(&list, 256, " ");
             }
             pcmk__add_word(&list, 0, param->rap_name);
 
         } else {
             crm_trace("Rejecting %s for %s", param->rap_name, ra_param_flag2text(param_type));
         }
 
         if (accept_for_xml) {
             const char *v = g_hash_table_lookup(op->params, param->rap_name);
 
             if (v != NULL) {
                 crm_trace("Adding attr %s=%s to the xml result", param->rap_name, v);
                 crm_xml_add(*result, param->rap_name, v);
             }
 
         } else {
             crm_trace("Removing attr %s from the xml result", param->rap_name);
             pcmk__xe_remove_attr(*result, param->rap_name);
         }
     }
 
     if (list != NULL) {
         // We will later search for " WORD ", so end list with a space
         pcmk__add_word(&list, 0, " ");
     }
     return list;
 }
 
 static void
 append_restart_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
                     xmlNode *update, const char *version)
 {
     GString *list = NULL;
     char *digest = NULL;
     xmlNode *restart = NULL;
 
     CRM_LOG_ASSERT(op->params != NULL);
 
     if (op->interval_ms > 0) {
         /* monitors are not reloadable */
         return;
     }
 
     if (pcmk_is_set(metadata->ra_flags, ra_supports_reload_agent)) {
         /* Add parameters not marked reloadable to the PCMK__XA_OP_FORCE_RESTART
          * list
          */
         list = build_parameter_list(op, metadata, ra_param_reloadable,
                                     &restart);
 
     } else if (pcmk_is_set(metadata->ra_flags, ra_supports_legacy_reload)) {
         /* @COMPAT pre-OCF-1.1 resource agents
          *
          * Before OCF 1.1, Pacemaker abused "unique=0" to indicate
          * reloadability. Add any parameters with unique="1" to the
          * PCMK__XA_OP_FORCE_RESTART list.
          */
         list = build_parameter_list(op, metadata, ra_param_unique, &restart);
 
     } else {
         // Resource does not support agent reloads
         return;
     }
 
     digest = calculate_operation_digest(restart, version);
     /* Add PCMK__XA_OP_FORCE_RESTART and PCMK__XA_OP_RESTART_DIGEST to indicate
      * the resource supports reload, no matter if it actually supports any
      * reloadable parameters
      */
     crm_xml_add(update, PCMK__XA_OP_FORCE_RESTART,
                 (list == NULL)? "" : (const char *) list->str);
     crm_xml_add(update, PCMK__XA_OP_RESTART_DIGEST, digest);
 
     if ((list != NULL) && (list->len > 0)) {
         crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str);
     } else {
         crm_trace("%s: %s", op->rsc_id, digest);
     }
 
     if (list != NULL) {
         g_string_free(list, TRUE);
     }
     free_xml(restart);
     free(digest);
 }
 
 static void
 append_secure_list(lrmd_event_data_t *op, struct ra_metadata_s *metadata,
                    xmlNode *update, const char *version)
 {
     GString *list = NULL;
     char *digest = NULL;
     xmlNode *secure = NULL;
 
     CRM_LOG_ASSERT(op->params != NULL);
 
     /* To keep PCMK__XA_OP_SECURE_PARAMS short, we want it to contain the secure
      * parameters but PCMK__XA_OP_SECURE_DIGEST to be based on the insecure ones
      */
     list = build_parameter_list(op, metadata, ra_param_private, &secure);
 
     if (list != NULL) {
         digest = calculate_operation_digest(secure, version);
         crm_xml_add(update, PCMK__XA_OP_SECURE_PARAMS,
                     (const char *) list->str);
         crm_xml_add(update, PCMK__XA_OP_SECURE_DIGEST, digest);
 
         crm_trace("%s: %s, %s", op->rsc_id, digest, (const char *) list->str);
         g_string_free(list, TRUE);
     } else {
         crm_trace("%s: no secure parameters", op->rsc_id);
     }
 
     free_xml(secure);
     free(digest);
 }
 
 /*!
  * \internal
  * \brief Create XML for a resource history entry
  *
  * \param[in]     func       Function name of caller
  * \param[in,out] parent     XML to add entry to
  * \param[in]     rsc        Affected resource
  * \param[in,out] op         Action to add an entry for (or NULL to do nothing)
  * \param[in]     node_name  Node where action occurred
  */
 void
 controld_add_resource_history_xml_as(const char *func, xmlNode *parent,
                                      const lrmd_rsc_info_t *rsc,
                                      lrmd_event_data_t *op,
                                      const char *node_name)
 {
     int target_rc = 0;
     xmlNode *xml_op = NULL;
     struct ra_metadata_s *metadata = NULL;
     const char *caller_version = NULL;
     lrm_state_t *lrm_state = NULL;
 
     if (op == NULL) {
         return;
     }
 
     target_rc = rsc_op_expected_rc(op);
 
     caller_version = g_hash_table_lookup(op->params, PCMK_XA_CRM_FEATURE_SET);
     CRM_CHECK(caller_version != NULL, caller_version = CRM_FEATURE_SET);
 
     xml_op = pcmk__create_history_xml(parent, op, caller_version, target_rc,
                                       controld_globals.our_nodename, func);
     if (xml_op == NULL) {
         return;
     }
 
     if ((rsc == NULL) || (op->params == NULL)
         || !crm_op_needs_metadata(rsc->standard, op->op_type)) {
 
         crm_trace("No digests needed for %s action on %s (params=%p rsc=%p)",
                   op->op_type, op->rsc_id, op->params, rsc);
         return;
     }
 
     lrm_state = lrm_state_find(node_name);
     if (lrm_state == NULL) {
         crm_warn("Cannot calculate digests for operation " PCMK__OP_FMT
                  " because we have no connection to executor for %s",
                  op->rsc_id, op->op_type, op->interval_ms, node_name);
         return;
     }
 
     /* Ideally the metadata is cached, and the agent is just a fallback.
      *
      * @TODO Go through all callers and ensure they get metadata asynchronously
      * first.
      */
     metadata = controld_get_rsc_metadata(lrm_state, rsc,
                                          controld_metadata_from_agent
                                          |controld_metadata_from_cache);
     if (metadata == NULL) {
         return;
     }
 
     crm_trace("Including additional digests for %s:%s:%s",
               rsc->standard, rsc->provider, rsc->type);
     append_restart_list(op, metadata, xml_op, caller_version);
     append_secure_list(op, metadata, xml_op, caller_version);
 
     return;
 }
 
 /*!
  * \internal
  * \brief Record an action as pending in the CIB, if appropriate
  *
  * \param[in]     node_name  Node where the action is pending
  * \param[in]     rsc        Resource that action is for
  * \param[in,out] op         Pending action
  *
  * \return true if action was recorded in CIB, otherwise false
  */
 bool
 controld_record_pending_op(const char *node_name, const lrmd_rsc_info_t *rsc,
                            lrmd_event_data_t *op)
 {
     const char *record_pending = NULL;
 
     CRM_CHECK((node_name != NULL) && (rsc != NULL) && (op != NULL),
               return false);
 
     // Never record certain operation types as pending
     if ((op->op_type == NULL) || (op->params == NULL)
         || !controld_action_is_recordable(op->op_type)) {
         return false;
     }
 
     // Check action's PCMK_META_RECORD_PENDING meta-attribute (defaults to true)
     record_pending = crm_meta_value(op->params, PCMK_META_RECORD_PENDING);
     if ((record_pending != NULL) && !crm_is_true(record_pending)) {
         return false;
     }
 
     op->call_id = -1;
     op->t_run = time(NULL);
     op->t_rcchange = op->t_run;
 
     lrmd__set_result(op, PCMK_OCF_UNKNOWN, PCMK_EXEC_PENDING, NULL);
 
     crm_debug("Recording pending %s-interval %s for %s on %s in the CIB",
               pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
               node_name);
     controld_update_resource_history(node_name, rsc, op, 0);
     return true;
 }
 
 static void
 cib_rsc_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     switch (rc) {
         case pcmk_ok:
         case -pcmk_err_diff_failed:
         case -pcmk_err_diff_resync:
             crm_trace("Resource history update completed (call=%d rc=%d)",
                       call_id, rc);
             break;
         default:
             if (call_id > 0) {
                 crm_warn("Resource history update %d failed: %s "
                          CRM_XS " rc=%d", call_id, pcmk_strerror(rc), rc);
             } else {
                 crm_warn("Resource history update failed: %s " CRM_XS " rc=%d",
                          pcmk_strerror(rc), rc);
             }
     }
 
     if (call_id == pending_rsc_update) {
         pending_rsc_update = 0;
         controld_trigger_fsa();
     }
 }
 
 /* Only successful stops, and probes that found the resource inactive, get locks
  * recorded in the history. This ensures the resource stays locked to the node
  * until it is active there again after the node comes back up.
  */
 static bool
 should_preserve_lock(lrmd_event_data_t *op)
 {
     if (!pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
         return false;
     }
     if (!strcmp(op->op_type, PCMK_ACTION_STOP) && (op->rc == PCMK_OCF_OK)) {
         return true;
     }
     if (!strcmp(op->op_type, PCMK_ACTION_MONITOR)
         && (op->rc == PCMK_OCF_NOT_RUNNING)) {
         return true;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Request a CIB update
  *
  * \param[in]     section    Section of CIB to update
  * \param[in]     data       New XML of CIB section to update
  * \param[in]     options    CIB call options
  * \param[in]     callback   If not \c NULL, set this as the operation callback
  *
  * \return Standard Pacemaker return code
  *
  * \note If \p callback is \p cib_rsc_callback(), the CIB update's call ID is
  *       stored in \p pending_rsc_update on success.
  */
 int
 controld_update_cib(const char *section, xmlNode *data, int options,
                     void (*callback)(xmlNode *, int, int, xmlNode *, void *))
 {
     cib_t *cib = controld_globals.cib_conn;
     int cib_rc = -ENOTCONN;
 
     CRM_ASSERT(data != NULL);
 
     if (cib != NULL) {
         cib_rc = cib->cmds->modify(cib, section, data, options);
         if (cib_rc >= 0) {
             crm_debug("Submitted CIB update %d for %s section",
                       cib_rc, section);
         }
     }
 
     if (callback == NULL) {
         if (cib_rc < 0) {
             crm_err("Failed to update CIB %s section: %s",
                     section, pcmk_rc_str(pcmk_legacy2rc(cib_rc)));
         }
 
     } else {
         if ((cib_rc >= 0) && (callback == cib_rsc_callback)) {
             /* Checking for a particular callback is a little hacky, but it
              * didn't seem worth adding an output argument for cib_rc for just
              * one use case.
              */
             pending_rsc_update = cib_rc;
         }
         fsa_register_cib_callback(cib_rc, NULL, callback);
     }
 
     return (cib_rc >= 0)? pcmk_rc_ok : pcmk_legacy2rc(cib_rc);
 }
 
 /*!
  * \internal
  * \brief Update resource history entry in CIB
  *
  * \param[in]     node_name  Node where action occurred
  * \param[in]     rsc        Resource that action is for
  * \param[in,out] op         Action to record
  * \param[in]     lock_time  If nonzero, when resource was locked to node
  *
  * \note On success, the CIB update's call ID will be stored in
  *       pending_rsc_update.
  */
 void
 controld_update_resource_history(const char *node_name,
                                  const lrmd_rsc_info_t *rsc,
                                  lrmd_event_data_t *op, time_t lock_time)
 {
     xmlNode *update = NULL;
     xmlNode *xml = NULL;
     int call_opt = crmd_cib_smart_opt();
     const char *node_id = NULL;
     const char *container = NULL;
 
     CRM_CHECK((node_name != NULL) && (op != NULL), return);
 
     if (rsc == NULL) {
         crm_warn("Resource %s no longer exists in the executor", op->rsc_id);
         controld_ack_event_directly(NULL, NULL, rsc, op, op->rsc_id);
         return;
     }
 
     // <status>
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
 
     //   <node_state ...>
     xml = pcmk__xe_create(update, PCMK__XE_NODE_STATE);
     if (pcmk__str_eq(node_name, controld_globals.our_nodename,
                      pcmk__str_casei)) {
         node_id = controld_globals.our_uuid;
     } else {
         node_id = node_name;
         pcmk__xe_set_bool_attr(xml, PCMK_XA_REMOTE_NODE, true);
     }
     crm_xml_add(xml, PCMK_XA_ID, node_id);
     crm_xml_add(xml, PCMK_XA_UNAME, node_name);
     crm_xml_add(xml, PCMK_XA_CRM_DEBUG_ORIGIN, __func__);
 
     //     <lrm ...>
     xml = pcmk__xe_create(xml, PCMK__XE_LRM);
     crm_xml_add(xml, PCMK_XA_ID, node_id);
 
     //       <lrm_resources>
     xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCES);
 
     //         <lrm_resource ...>
     xml = pcmk__xe_create(xml, PCMK__XE_LRM_RESOURCE);
     crm_xml_add(xml, PCMK_XA_ID, op->rsc_id);
     crm_xml_add(xml, PCMK_XA_CLASS, rsc->standard);
     crm_xml_add(xml, PCMK_XA_PROVIDER, rsc->provider);
     crm_xml_add(xml, PCMK_XA_TYPE, rsc->type);
     if (lock_time != 0) {
         /* Actions on a locked resource should either preserve the lock by
          * recording it with the action result, or clear it.
          */
         if (!should_preserve_lock(op)) {
             lock_time = 0;
         }
         crm_xml_add_ll(xml, PCMK_OPT_SHUTDOWN_LOCK, (long long) lock_time);
     }
     if (op->params != NULL) {
         container = g_hash_table_lookup(op->params,
                                         CRM_META "_" PCMK__META_CONTAINER);
         if (container != NULL) {
             crm_trace("Resource %s is a part of container resource %s",
                       op->rsc_id, container);
             crm_xml_add(xml, PCMK__META_CONTAINER, container);
         }
     }
 
     //           <lrm_resource_op ...> (possibly more than one)
     controld_add_resource_history_xml(xml, rsc, op, node_name);
 
     /* Update CIB asynchronously. Even if it fails, the resource state should be
      * discovered during the next election. Worst case, the node is wrongly
      * fenced for running a resource it isn't.
      */
     crm_log_xml_trace(update, __func__);
     controld_update_cib(PCMK_XE_STATUS, update, call_opt, cib_rsc_callback);
     free_xml(update);
 }
 
 /*!
  * \internal
  * \brief Erase an LRM history entry from the CIB, given the operation data
  *
  * \param[in] op         Operation whose history should be deleted
  */
 void
 controld_delete_action_history(const lrmd_event_data_t *op)
 {
     xmlNode *xml_top = NULL;
 
     CRM_CHECK(op != NULL, return);
 
     xml_top = pcmk__xe_create(NULL, PCMK__XE_LRM_RSC_OP);
     crm_xml_add_int(xml_top, PCMK__XA_CALL_ID, op->call_id);
     crm_xml_add(xml_top, PCMK__XA_TRANSITION_KEY, op->user_data);
 
     if (op->interval_ms > 0) {
         char *op_id = pcmk__op_key(op->rsc_id, op->op_type, op->interval_ms);
 
         /* Avoid deleting last_failure too (if it was a result of this recurring op failing) */
         crm_xml_add(xml_top, PCMK_XA_ID, op_id);
         free(op_id);
     }
 
     crm_debug("Erasing resource operation history for " PCMK__OP_FMT " (call=%d)",
               op->rsc_id, op->op_type, op->interval_ms, op->call_id);
 
     controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn,
                                             PCMK_XE_STATUS, xml_top, cib_none);
     crm_log_xml_trace(xml_top, "op:cancel");
     free_xml(xml_top);
 }
 
 /* Define xpath to find LRM resource history entry by node and resource */
 #define XPATH_HISTORY                                   \
     "/" PCMK_XE_CIB "/" PCMK_XE_STATUS                  \
     "/" PCMK__XE_NODE_STATE "[@" PCMK_XA_UNAME "='%s']" \
     "/" PCMK__XE_LRM "/" PCMK__XE_LRM_RESOURCES         \
     "/" PCMK__XE_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"  \
     "/" PCMK__XE_LRM_RSC_OP
 
 /* ... and also by operation key */
 #define XPATH_HISTORY_ID XPATH_HISTORY "[@" PCMK_XA_ID "='%s']"
 
 /* ... and also by operation key and operation call ID */
 #define XPATH_HISTORY_CALL XPATH_HISTORY \
     "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_CALL_ID "='%d']"
 
 /* ... and also by operation key and original operation key */
 #define XPATH_HISTORY_ORIG XPATH_HISTORY \
     "[@" PCMK_XA_ID "='%s' and @" PCMK__XA_OPERATION_KEY "='%s']"
 
 /*!
  * \internal
  * \brief Delete a last_failure resource history entry from the CIB
  *
  * \param[in] rsc_id       Name of resource to clear history for
  * \param[in] node         Name of node to clear history for
  * \param[in] action       If specified, delete only if this was failed action
  * \param[in] interval_ms  If \p action is specified, it has this interval
  */
 void
 controld_cib_delete_last_failure(const char *rsc_id, const char *node,
                                  const char *action, guint interval_ms)
 {
     char *xpath = NULL;
     char *last_failure_key = NULL;
     CRM_CHECK((rsc_id != NULL) && (node != NULL), return);
 
     // Generate XPath to match desired entry
     last_failure_key = pcmk__op_key(rsc_id, "last_failure", 0);
     if (action == NULL) {
         xpath = crm_strdup_printf(XPATH_HISTORY_ID, node, rsc_id,
                                   last_failure_key);
     } else {
         char *action_key = pcmk__op_key(rsc_id, action, interval_ms);
 
         xpath = crm_strdup_printf(XPATH_HISTORY_ORIG, node, rsc_id,
                                   last_failure_key, action_key);
         free(action_key);
     }
     free(last_failure_key);
 
     controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath,
                                             NULL, cib_xpath);
     free(xpath);
 }
 
 /*!
  * \internal
  * \brief Delete resource history entry from the CIB, given operation key
  *
  * \param[in] rsc_id     Name of resource to clear history for
  * \param[in] node       Name of node to clear history for
  * \param[in] key        Operation key of operation to clear history for
  * \param[in] call_id    If specified, delete entry only if it has this call ID
  */
 void
 controld_delete_action_history_by_key(const char *rsc_id, const char *node,
                                       const char *key, int call_id)
 {
     char *xpath = NULL;
 
     CRM_CHECK((rsc_id != NULL) && (node != NULL) && (key != NULL), return);
 
     if (call_id > 0) {
         xpath = crm_strdup_printf(XPATH_HISTORY_CALL, node, rsc_id, key,
                                   call_id);
     } else {
         xpath = crm_strdup_printf(XPATH_HISTORY_ID, node, rsc_id, key);
     }
     controld_globals.cib_conn->cmds->remove(controld_globals.cib_conn, xpath,
                                             NULL, cib_xpath);
     free(xpath);
 }
diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c
index b3d9526db6..c928b65e84 100644
--- a/daemons/controld/controld_join_dc.c
+++ b/daemons/controld/controld_join_dc.c
@@ -1,1045 +1,1045 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 
 #include <crm/common/xml.h>
 #include <crm/cluster.h>
 
 #include <pacemaker-controld.h>
 
 static char *max_generation_from = NULL;
 static xmlNodePtr max_generation_xml = NULL;
 
 /*!
  * \internal
  * \brief Nodes from which a CIB sync has failed since the peer joined
  *
  * This table is of the form (<tt>node_name -> join_id</tt>). \p node_name is
  * the name of a client node from which a CIB \p sync_from() call has failed in
  * \p do_dc_join_finalize() since the client joined the cluster as a peer.
  * \p join_id is the ID of the join round in which the \p sync_from() failed,
  * and is intended for use in nack log messages.
  */
 static GHashTable *failed_sync_nodes = NULL;
 
 void finalize_join_for(gpointer key, gpointer value, gpointer user_data);
 void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data);
 gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source);
 
 /* Numeric counter used to identify join rounds (an unsigned int would be
  * appropriate, except we get and set it in XML as int)
  */
 static int current_join_id = 0;
 
 /*!
  * \internal
  * \brief Destroy the hash table containing failed sync nodes
  */
 void
 controld_destroy_failed_sync_table(void)
 {
     if (failed_sync_nodes != NULL) {
         g_hash_table_destroy(failed_sync_nodes);
         failed_sync_nodes = NULL;
     }
 }
 
 /*!
  * \internal
  * \brief Remove a node from the failed sync nodes table if present
  *
  * \param[in] node_name  Node name to remove
  */
 void
 controld_remove_failed_sync_node(const char *node_name)
 {
     if (failed_sync_nodes != NULL) {
         g_hash_table_remove(failed_sync_nodes, (gchar *) node_name);
     }
 }
 
 /*!
  * \internal
  * \brief Add to a hash table a node whose CIB failed to sync
  *
  * \param[in] node_name  Name of node whose CIB failed to sync
  * \param[in] join_id    Join round when the failure occurred
  */
 static void
 record_failed_sync_node(const char *node_name, gint join_id)
 {
     if (failed_sync_nodes == NULL) {
         failed_sync_nodes = pcmk__strikey_table(g_free, NULL);
     }
 
     /* If the node is already in the table then we failed to nack it during the
      * filter offer step
      */
     CRM_LOG_ASSERT(g_hash_table_insert(failed_sync_nodes, g_strdup(node_name),
                                        GINT_TO_POINTER(join_id)));
 }
 
 /*!
  * \internal
  * \brief Look up a node name in the failed sync table
  *
  * \param[in]  node_name  Name of node to look up
  * \param[out] join_id    Where to store the join ID of when the sync failed
  *
  * \return Standard Pacemaker return code. Specifically, \p pcmk_rc_ok if the
  *         node name was found, or \p pcmk_rc_node_unknown otherwise.
  * \note \p *join_id is set to -1 if the node is not found.
  */
 static int
 lookup_failed_sync_node(const char *node_name, gint *join_id)
 {
     *join_id = -1;
 
     if (failed_sync_nodes != NULL) {
         gpointer result = g_hash_table_lookup(failed_sync_nodes,
                                               (gchar *) node_name);
         if (result != NULL) {
             *join_id = GPOINTER_TO_INT(result);
             return pcmk_rc_ok;
         }
     }
     return pcmk_rc_node_unknown;
 }
 
 void
 crm_update_peer_join(const char *source, crm_node_t * node, enum crm_join_phase phase)
 {
     enum crm_join_phase last = 0;
 
     CRM_CHECK(node != NULL, return);
 
     /* Remote nodes do not participate in joins */
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         return;
     }
 
     last = node->join;
 
     if(phase == last) {
         crm_trace("Node %s join-%d phase is still %s "
                   CRM_XS " nodeid=%u source=%s",
                   node->uname, current_join_id, crm_join_phase_str(last),
                   node->id, source);
 
     } else if ((phase <= crm_join_none) || (phase == (last + 1))) {
         node->join = phase;
         crm_trace("Node %s join-%d phase is now %s (was %s) "
                   CRM_XS " nodeid=%u source=%s",
                  node->uname, current_join_id, crm_join_phase_str(phase),
                  crm_join_phase_str(last), node->id, source);
 
     } else {
         crm_warn("Rejecting join-%d phase update for node %s because "
                  "can't go from %s to %s " CRM_XS " nodeid=%u source=%s",
                  current_join_id, node->uname, crm_join_phase_str(last),
                  crm_join_phase_str(phase), node->id, source);
     }
 }
 
 static void
 start_join_round(void)
 {
     GHashTableIter iter;
     crm_node_t *peer = NULL;
 
     crm_debug("Starting new join round join-%d", current_join_id);
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) {
         crm_update_peer_join(__func__, peer, crm_join_none);
     }
     if (max_generation_from != NULL) {
         free(max_generation_from);
         max_generation_from = NULL;
     }
     if (max_generation_xml != NULL) {
         free_xml(max_generation_xml);
         max_generation_xml = NULL;
     }
     controld_clear_fsa_input_flags(R_HAVE_CIB);
 }
 
 /*!
  * \internal
  * \brief Create a join message from the DC
  *
  * \param[in] join_op  Join operation name
  * \param[in] host_to  Recipient of message
  */
 static xmlNode *
 create_dc_message(const char *join_op, const char *host_to)
 {
     xmlNode *msg = create_request(join_op, NULL, host_to, CRM_SYSTEM_CRMD,
                                   CRM_SYSTEM_DC, NULL);
 
     /* Identify which election this is a part of */
     crm_xml_add_int(msg, PCMK__XA_JOIN_ID, current_join_id);
 
     /* Add a field specifying whether the DC is shutting down. This keeps the
      * joining node from fencing the old DC if it becomes the new DC.
      */
     pcmk__xe_set_bool_attr(msg, PCMK__XA_DC_LEAVING,
                            pcmk_is_set(controld_globals.fsa_input_register,
                                        R_SHUTDOWN));
     return msg;
 }
 
 static void
 join_make_offer(gpointer key, gpointer value, gpointer user_data)
 {
     xmlNode *offer = NULL;
     crm_node_t *member = (crm_node_t *)value;
 
     CRM_ASSERT(member != NULL);
     if (crm_is_peer_active(member) == FALSE) {
         crm_info("Not making join-%d offer to inactive node %s",
                  current_join_id,
                  (member->uname? member->uname : "with unknown name"));
         if(member->expected == NULL && pcmk__str_eq(member->state, CRM_NODE_LOST, pcmk__str_casei)) {
             /* You would think this unsafe, but in fact this plus an
              * active resource is what causes it to be fenced.
              *
              * Yes, this does mean that any node that dies at the same
              * time as the old DC and is not running resource (still)
              * won't be fenced.
              *
              * I'm not happy about this either.
              */
             pcmk__update_peer_expected(__func__, member, CRMD_JOINSTATE_DOWN);
         }
         return;
     }
 
     if (member->uname == NULL) {
         crm_info("Not making join-%d offer to node uuid %s with unknown name",
                  current_join_id, member->uuid);
         return;
     }
 
     if (controld_globals.membership_id != crm_peer_seq) {
         controld_globals.membership_id = crm_peer_seq;
         crm_info("Making join-%d offers based on membership event %llu",
                  current_join_id, crm_peer_seq);
     }
 
     if(user_data && member->join > crm_join_none) {
         crm_info("Not making join-%d offer to already known node %s (%s)",
                  current_join_id, member->uname,
                  crm_join_phase_str(member->join));
         return;
     }
 
     crm_update_peer_join(__func__, (crm_node_t*)member, crm_join_none);
 
     offer = create_dc_message(CRM_OP_JOIN_OFFER, member->uname);
 
     // Advertise our feature set so the joining node can bail if not compatible
     crm_xml_add(offer, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
 
     crm_info("Sending join-%d offer to %s", current_join_id, member->uname);
     send_cluster_message(member, crm_msg_crmd, offer, TRUE);
     free_xml(offer);
 
     crm_update_peer_join(__func__, member, crm_join_welcomed);
 }
 
 /*	 A_DC_JOIN_OFFER_ALL	*/
 void
 do_dc_join_offer_all(long long action,
                      enum crmd_fsa_cause cause,
                      enum crmd_fsa_state cur_state,
                      enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     int count;
 
     /* Reset everyone's status back to down or in_ccm in the CIB.
      * Any nodes that are active in the CIB but not in the cluster membership
      * will be seen as offline by the scheduler anyway.
      */
     current_join_id++;
     start_join_round();
 
     update_dc(NULL);
     if (cause == C_HA_MESSAGE && current_input == I_NODE_JOIN) {
         crm_info("A new node joined the cluster");
     }
     g_hash_table_foreach(crm_peer_cache, join_make_offer, NULL);
 
     count = crmd_join_phase_count(crm_join_welcomed);
     crm_info("Waiting on join-%d requests from %d outstanding node%s",
              current_join_id, count, pcmk__plural_s(count));
 
     // Don't waste time by invoking the scheduler yet
 }
 
 /*	 A_DC_JOIN_OFFER_ONE	*/
 void
 do_dc_join_offer_one(long long action,
                      enum crmd_fsa_cause cause,
                      enum crmd_fsa_state cur_state,
                      enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     crm_node_t *member;
     ha_msg_input_t *welcome = NULL;
     int count;
     const char *join_to = NULL;
 
     if (msg_data->data == NULL) {
         crm_info("Making join-%d offers to any unconfirmed nodes "
                  "because an unknown node joined", current_join_id);
         g_hash_table_foreach(crm_peer_cache, join_make_offer, &member);
         check_join_state(cur_state, __func__);
         return;
     }
 
     welcome = fsa_typed_data(fsa_dt_ha_msg);
     if (welcome == NULL) {
         // fsa_typed_data() already logged an error
         return;
     }
 
     join_to = crm_element_value(welcome->msg, PCMK__XA_SRC);
     if (join_to == NULL) {
         crm_err("Can't make join-%d offer to unknown node", current_join_id);
         return;
     }
     member = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster);
 
     /* It is possible that a node will have been sick or starting up when the
      * original offer was made. However, it will either re-announce itself in
      * due course, or we can re-store the original offer on the client.
      */
 
     crm_update_peer_join(__func__, member, crm_join_none);
     join_make_offer(NULL, member, NULL);
 
     /* If the offer isn't to the local node, make an offer to the local node as
      * well, to ensure the correct value for max_generation_from.
      */
     if (strcasecmp(join_to, controld_globals.our_nodename) != 0) {
         member = pcmk__get_node(0, controld_globals.our_nodename, NULL,
                                 pcmk__node_search_cluster);
         join_make_offer(NULL, member, NULL);
     }
 
     /* This was a genuine join request; cancel any existing transition and
      * invoke the scheduler.
      */
     abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Node join",
                      NULL);
 
     count = crmd_join_phase_count(crm_join_welcomed);
     crm_info("Waiting on join-%d requests from %d outstanding node%s",
              current_join_id, count, pcmk__plural_s(count));
 
     // Don't waste time by invoking the scheduler yet
 }
 
 static int
 compare_int_fields(xmlNode * left, xmlNode * right, const char *field)
 {
     const char *elem_l = crm_element_value(left, field);
     const char *elem_r = crm_element_value(right, field);
 
     long long int_elem_l;
     long long int_elem_r;
 
     pcmk__scan_ll(elem_l, &int_elem_l, -1LL);
     pcmk__scan_ll(elem_r, &int_elem_r, -1LL);
 
     if (int_elem_l < int_elem_r) {
         return -1;
 
     } else if (int_elem_l > int_elem_r) {
         return 1;
     }
 
     return 0;
 }
 
 /*	 A_DC_JOIN_PROCESS_REQ	*/
 void
 do_dc_join_filter_offer(long long action,
                         enum crmd_fsa_cause cause,
                         enum crmd_fsa_state cur_state,
                         enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     xmlNode *generation = NULL;
 
     int cmp = 0;
     int join_id = -1;
     int count = 0;
     gint value = 0;
     gboolean ack_nack_bool = TRUE;
     ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg);
 
     const char *join_from = crm_element_value(join_ack->msg, PCMK__XA_SRC);
     const char *ref = crm_element_value(join_ack->msg, PCMK_XA_REFERENCE);
     const char *join_version = crm_element_value(join_ack->msg,
                                                  PCMK_XA_CRM_FEATURE_SET);
     crm_node_t *join_node = NULL;
 
     if (join_from == NULL) {
         crm_err("Ignoring invalid join request without node name");
         return;
     }
     join_node = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster);
 
     crm_element_value_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id);
     if (join_id != current_join_id) {
         crm_debug("Ignoring join-%d request from %s because we are on join-%d",
                   join_id, join_from, current_join_id);
         check_join_state(cur_state, __func__);
         return;
     }
 
     generation = join_ack->xml;
     if (max_generation_xml != NULL && generation != NULL) {
         int lpc = 0;
 
         const char *attributes[] = {
             PCMK_XA_ADMIN_EPOCH,
             PCMK_XA_EPOCH,
             PCMK_XA_NUM_UPDATES,
         };
 
         /* It's not obvious that join_ack->xml is the PCMK__XE_GENERATION_TUPLE
          * element from the join client. The "if" guard is for clarity.
          */
         if (pcmk__xe_is(generation, PCMK__XE_GENERATION_TUPLE)) {
             for (lpc = 0; cmp == 0 && lpc < PCMK__NELEM(attributes); lpc++) {
                 cmp = compare_int_fields(max_generation_xml, generation,
                                          attributes[lpc]);
             }
 
         } else {    // Should always be PCMK__XE_GENERATION_TUPLE
             CRM_LOG_ASSERT(false);
         }
     }
 
     if (ref == NULL) {
         ref = "none"; // for logging only
     }
 
     if (lookup_failed_sync_node(join_from, &value) == pcmk_rc_ok) {
         crm_err("Rejecting join-%d request from node %s because we failed to "
                 "sync its CIB in join-%d " CRM_XS " ref=%s",
                 join_id, join_from, value, ref);
         ack_nack_bool = FALSE;
 
     } else if (!crm_is_peer_active(join_node)) {
         if (match_down_event(join_from) != NULL) {
             /* The join request was received after the node was fenced or
              * otherwise shutdown in a way that we're aware of. No need to log
              * an error in this rare occurrence; we know the client was recently
              * shut down, and receiving a lingering in-flight request is not
              * cause for alarm.
              */
             crm_debug("Rejecting join-%d request from inactive node %s "
                       CRM_XS " ref=%s", join_id, join_from, ref);
         } else {
             crm_err("Rejecting join-%d request from inactive node %s "
                     CRM_XS " ref=%s", join_id, join_from, ref);
         }
         ack_nack_bool = FALSE;
 
     } else if (generation == NULL) {
         crm_err("Rejecting invalid join-%d request from node %s "
                 "missing CIB generation " CRM_XS " ref=%s",
                 join_id, join_from, ref);
         ack_nack_bool = FALSE;
 
     } else if ((join_version == NULL)
                || !feature_set_compatible(CRM_FEATURE_SET, join_version)) {
         crm_err("Rejecting join-%d request from node %s because feature set %s"
                 " is incompatible with ours (%s) " CRM_XS " ref=%s",
                 join_id, join_from, (join_version? join_version : "pre-3.1.0"),
                 CRM_FEATURE_SET, ref);
         ack_nack_bool = FALSE;
 
     } else if (max_generation_xml == NULL) {
         const char *validation = crm_element_value(generation,
                                                    PCMK_XA_VALIDATE_WITH);
 
         if (pcmk__get_schema(validation) == NULL) {
             crm_err("Rejecting join-%d request from %s (with first CIB "
                     "generation) due to unknown schema version %s "
                     CRM_XS " ref=%s",
                     join_id, join_from, pcmk__s(validation, "(missing)"), ref);
             ack_nack_bool = FALSE;
 
         } else {
             crm_debug("Accepting join-%d request from %s (with first CIB "
                       "generation) " CRM_XS " ref=%s",
                       join_id, join_from, ref);
             max_generation_xml = pcmk__xml_copy(NULL, generation);
             pcmk__str_update(&max_generation_from, join_from);
         }
 
     } else if ((cmp < 0)
                || ((cmp == 0)
                    && pcmk__str_eq(join_from, controld_globals.our_nodename,
                                    pcmk__str_casei))) {
         const char *validation = crm_element_value(generation,
                                                    PCMK_XA_VALIDATE_WITH);
 
         if (pcmk__get_schema(validation) == NULL) {
             crm_err("Rejecting join-%d request from %s (with better CIB "
                     "generation than current best from %s) due to unknown "
                     "schema version %s " CRM_XS " ref=%s",
                     join_id, join_from, max_generation_from,
                     pcmk__s(validation, "(missing)"), ref);
             ack_nack_bool = FALSE;
 
         } else {
             crm_debug("Accepting join-%d request from %s (with better CIB "
                       "generation than current best from %s) " CRM_XS " ref=%s",
                       join_id, join_from, max_generation_from, ref);
             crm_log_xml_debug(max_generation_xml, "Old max generation");
             crm_log_xml_debug(generation, "New max generation");
 
             free_xml(max_generation_xml);
             max_generation_xml = pcmk__xml_copy(NULL, join_ack->xml);
             pcmk__str_update(&max_generation_from, join_from);
         }
 
     } else {
         crm_debug("Accepting join-%d request from %s " CRM_XS " ref=%s",
                   join_id, join_from, ref);
     }
 
     if (!ack_nack_bool) {
         if (compare_version(join_version, "3.17.0") < 0) {
             /* Clients with CRM_FEATURE_SET < 3.17.0 may respawn infinitely
              * after a nack message, don't send one
              */
             crm_update_peer_join(__func__, join_node, crm_join_nack_quiet);
         } else {
             crm_update_peer_join(__func__, join_node, crm_join_nack);
         }
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_NACK);
 
     } else {
         crm_update_peer_join(__func__, join_node, crm_join_integrated);
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER);
     }
 
     count = crmd_join_phase_count(crm_join_integrated);
     crm_debug("%d node%s currently integrated in join-%d",
               count, pcmk__plural_s(count), join_id);
 
     if (check_join_state(cur_state, __func__) == FALSE) {
         // Don't waste time by invoking the scheduler yet
         count = crmd_join_phase_count(crm_join_welcomed);
         crm_debug("Waiting on join-%d requests from %d outstanding node%s",
                   join_id, count, pcmk__plural_s(count));
     }
 }
 
 /*	A_DC_JOIN_FINALIZE	*/
 void
 do_dc_join_finalize(long long action,
                     enum crmd_fsa_cause cause,
                     enum crmd_fsa_state cur_state,
                     enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     char *sync_from = NULL;
     int rc = pcmk_ok;
     int count_welcomed = crmd_join_phase_count(crm_join_welcomed);
     int count_finalizable = crmd_join_phase_count(crm_join_integrated)
                             + crmd_join_phase_count(crm_join_nack)
                             + crmd_join_phase_count(crm_join_nack_quiet);
 
     /* This we can do straight away and avoid clients timing us out
      *  while we compute the latest CIB
      */
     if (count_welcomed != 0) {
         crm_debug("Waiting on join-%d requests from %d outstanding node%s "
                   "before finalizing join", current_join_id, count_welcomed,
                   pcmk__plural_s(count_welcomed));
         crmd_join_phase_log(LOG_DEBUG);
         /* crmd_fsa_stall(FALSE); Needed? */
         return;
 
     } else if (count_finalizable == 0) {
         crm_debug("Finalization not needed for join-%d at the current time",
                   current_join_id);
         crmd_join_phase_log(LOG_DEBUG);
         check_join_state(controld_globals.fsa_state, __func__);
         return;
     }
 
     controld_clear_fsa_input_flags(R_HAVE_CIB);
     if (pcmk__str_eq(max_generation_from, controld_globals.our_nodename,
                      pcmk__str_null_matches|pcmk__str_casei)) {
         controld_set_fsa_input_flags(R_HAVE_CIB);
     }
 
     if (!controld_globals.transition_graph->complete) {
         crm_warn("Delaying join-%d finalization while transition in progress",
                  current_join_id);
         crmd_join_phase_log(LOG_DEBUG);
         crmd_fsa_stall(FALSE);
         return;
     }
 
     if (pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) {
         // Send our CIB out to everyone
         sync_from = pcmk__str_copy(controld_globals.our_nodename);
         crm_debug("Finalizing join-%d for %d node%s (sync'ing from local CIB)",
                   current_join_id, count_finalizable,
                   pcmk__plural_s(count_finalizable));
         crm_log_xml_debug(max_generation_xml, "Requested CIB version");
 
     } else {
         // Ask for the agreed best CIB
         sync_from = pcmk__str_copy(max_generation_from);
         crm_notice("Finalizing join-%d for %d node%s (sync'ing CIB from %s)",
                    current_join_id, count_finalizable,
                    pcmk__plural_s(count_finalizable), sync_from);
         crm_log_xml_notice(max_generation_xml, "Requested CIB version");
     }
     crmd_join_phase_log(LOG_DEBUG);
 
     rc = controld_globals.cib_conn->cmds->sync_from(controld_globals.cib_conn,
                                                     sync_from, NULL, cib_none);
     fsa_register_cib_callback(rc, sync_from, finalize_sync_callback);
 }
 
 void
 free_max_generation(void)
 {
     free(max_generation_from);
     max_generation_from = NULL;
 
     free_xml(max_generation_xml);
     max_generation_xml = NULL;
 }
 
 void
 finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     CRM_LOG_ASSERT(-EPERM != rc);
 
     if (rc != pcmk_ok) {
         const char *sync_from = (const char *) user_data;
 
         do_crm_log(((rc == -pcmk_err_old_data)? LOG_WARNING : LOG_ERR),
                    "Could not sync CIB from %s in join-%d: %s",
                    sync_from, current_join_id, pcmk_strerror(rc));
 
         if (rc != -pcmk_err_old_data) {
             record_failed_sync_node(sync_from, current_join_id);
         }
 
         /* restart the whole join process */
         register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION_DC, NULL, NULL,
                                __func__);
 
     } else if (!AM_I_DC) {
         crm_debug("Sync'ed CIB for join-%d but no longer DC", current_join_id);
 
     } else if (controld_globals.fsa_state != S_FINALIZE_JOIN) {
         crm_debug("Sync'ed CIB for join-%d but no longer in S_FINALIZE_JOIN "
                   "(%s)", current_join_id,
                   fsa_state2string(controld_globals.fsa_state));
 
     } else {
         controld_set_fsa_input_flags(R_HAVE_CIB);
 
         /* make sure dc_uuid is re-set to us */
         if (!check_join_state(controld_globals.fsa_state, __func__)) {
             int count_finalizable = 0;
 
             count_finalizable = crmd_join_phase_count(crm_join_integrated)
                                 + crmd_join_phase_count(crm_join_nack)
                                 + crmd_join_phase_count(crm_join_nack_quiet);
 
             crm_debug("Notifying %d node%s of join-%d results",
                       count_finalizable, pcmk__plural_s(count_finalizable),
                       current_join_id);
             g_hash_table_foreach(crm_peer_cache, finalize_join_for, NULL);
         }
     }
 }
 
 static void
 join_node_state_commit_callback(xmlNode *msg, int call_id, int rc,
                                 xmlNode *output, void *user_data)
 {
     const char *node = user_data;
 
     if (rc != pcmk_ok) {
         fsa_data_t *msg_data = NULL;    // for register_fsa_error() macro
 
         crm_crit("join-%d node history update (via CIB call %d) for node %s "
                  "failed: %s",
                  current_join_id, call_id, node, pcmk_strerror(rc));
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 
     crm_debug("join-%d node history update (via CIB call %d) for node %s "
               "complete",
               current_join_id, call_id, node);
     check_join_state(controld_globals.fsa_state, __func__);
 }
 
 /*	A_DC_JOIN_PROCESS_ACK	*/
 void
 do_dc_join_ack(long long action,
                enum crmd_fsa_cause cause,
                enum crmd_fsa_state cur_state,
                enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     int join_id = -1;
     ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg);
 
     const char *op = crm_element_value(join_ack->msg, PCMK__XA_CRM_TASK);
     char *join_from = crm_element_value_copy(join_ack->msg, PCMK__XA_SRC);
     crm_node_t *peer = NULL;
 
     enum controld_section_e section = controld_section_lrm;
     char *xpath = NULL;
     xmlNode *state = join_ack->xml;
     xmlNode *execd_state = NULL;
 
     cib_t *cib = controld_globals.cib_conn;
     int rc = pcmk_ok;
 
     // Sanity checks
     if (join_from == NULL) {
         crm_warn("Ignoring message received without node identification");
         goto done;
     }
     if (op == NULL) {
         crm_warn("Ignoring message received from %s without task", join_from);
         goto done;
     }
 
     if (strcmp(op, CRM_OP_JOIN_CONFIRM)) {
         crm_debug("Ignoring '%s' message from %s while waiting for '%s'",
                   op, join_from, CRM_OP_JOIN_CONFIRM);
         goto done;
     }
 
     if (crm_element_value_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id) != 0) {
         crm_warn("Ignoring join confirmation from %s without valid join ID",
                  join_from);
         goto done;
     }
 
     peer = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster);
     if (peer->join != crm_join_finalized) {
         crm_info("Ignoring out-of-sequence join-%d confirmation from %s "
                  "(currently %s not %s)",
                  join_id, join_from, crm_join_phase_str(peer->join),
                  crm_join_phase_str(crm_join_finalized));
         goto done;
     }
 
     if (join_id != current_join_id) {
         crm_err("Rejecting join-%d confirmation from %s "
                 "because currently on join-%d",
                 join_id, join_from, current_join_id);
         crm_update_peer_join(__func__, peer, crm_join_nack);
         goto done;
     }
 
     crm_update_peer_join(__func__, peer, crm_join_confirmed);
 
     /* Update CIB with node's current executor state. A new transition will be
      * triggered later, when the CIB manager notifies us of the change.
      *
      * The delete and modify requests are part of an atomic transaction.
      */
     rc = cib->cmds->init_transaction(cib);
     if (rc != pcmk_ok) {
         goto done;
     }
 
     // Delete relevant parts of node's current executor state from CIB
     if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
         section = controld_section_lrm_unlocked;
     }
     controld_node_state_deletion_strings(join_from, section, &xpath, NULL);
 
     rc = cib->cmds->remove(cib, xpath, NULL,
                            cib_scope_local
                            |cib_xpath
                            |cib_multiple
                            |cib_transaction);
     if (rc != pcmk_ok) {
         goto done;
     }
 
     // Update CIB with node's latest known executor state
     if (pcmk__str_eq(join_from, controld_globals.our_nodename,
                      pcmk__str_casei)) {
 
         // Use the latest possible state if processing our own join ack
         execd_state = controld_query_executor_state();
 
         if (execd_state != NULL) {
             crm_debug("Updating local node history for join-%d from query "
                       "result",
                       current_join_id);
             state = execd_state;
 
         } else {
             crm_warn("Updating local node history from join-%d confirmation "
                      "because query failed",
                      current_join_id);
         }
 
     } else {
         crm_debug("Updating node history for %s from join-%d confirmation",
                   join_from, current_join_id);
     }
 
     rc = cib->cmds->modify(cib, PCMK_XE_STATUS, state,
                            cib_scope_local|cib_can_create|cib_transaction);
     free_xml(execd_state);
     if (rc != pcmk_ok) {
         goto done;
     }
 
     // Commit the transaction
     rc = cib->cmds->end_transaction(cib, true, cib_scope_local);
     fsa_register_cib_callback(rc, join_from, join_node_state_commit_callback);
 
     if (rc > 0) {
         // join_from will be freed after callback
         join_from = NULL;
         rc = pcmk_ok;
     }
 
 done:
     if (rc != pcmk_ok) {
         crm_crit("join-%d node history update for node %s failed: %s",
                  current_join_id, join_from, pcmk_strerror(rc));
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
     free(join_from);
     free(xpath);
 }
 
 void
 finalize_join_for(gpointer key, gpointer value, gpointer user_data)
 {
     xmlNode *acknak = NULL;
     xmlNode *tmp1 = NULL;
     crm_node_t *join_node = value;
     const char *join_to = join_node->uname;
     bool integrated = false;
 
     switch (join_node->join) {
         case crm_join_integrated:
             integrated = true;
             break;
         case crm_join_nack:
         case crm_join_nack_quiet:
             break;
         default:
             crm_trace("Not updating non-integrated and non-nacked node %s (%s) "
                       "for join-%d", join_to,
                       crm_join_phase_str(join_node->join), current_join_id);
             return;
     }
 
     /* Update the <node> element with the node's name and UUID, in case they
      * weren't known before
      */
     crm_trace("Updating node name and UUID in CIB for %s", join_to);
     tmp1 = pcmk__xe_create(NULL, PCMK_XE_NODE);
     crm_xml_add(tmp1, PCMK_XA_ID, crm_peer_uuid(join_node));
     crm_xml_add(tmp1, PCMK_XA_UNAME, join_to);
     fsa_cib_anon_update(PCMK_XE_NODES, tmp1);
     free_xml(tmp1);
 
     if (join_node->join == crm_join_nack_quiet) {
         crm_trace("Not sending nack message to node %s with feature set older "
                   "than 3.17.0", join_to);
         return;
     }
 
     join_node = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster);
     if (!crm_is_peer_active(join_node)) {
         /*
          * NACK'ing nodes that the membership layer doesn't know about yet
          * simply creates more churn
          *
          * Better to leave them waiting and let the join restart when
          * the new membership event comes in
          *
          * All other NACKs (due to versions etc) should still be processed
          */
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_PENDING);
         return;
     }
 
     // Acknowledge or nack node's join request
     crm_debug("%sing join-%d request from %s",
               integrated? "Acknowledg" : "Nack", current_join_id, join_to);
     acknak = create_dc_message(CRM_OP_JOIN_ACKNAK, join_to);
     pcmk__xe_set_bool_attr(acknak, CRM_OP_JOIN_ACKNAK, integrated);
 
     if (integrated) {
         // No change needed for a nacked node
         crm_update_peer_join(__func__, join_node, crm_join_finalized);
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER);
 
         /* Iterate through the remote peer cache and add information on which
          * node hosts each to the ACK message.  This keeps new controllers in
          * sync with what has already happened.
          */
-        if (crm_remote_peer_cache_size() != 0) {
+        if (pcmk__cluster_num_remote_nodes() > 0) {
             GHashTableIter iter;
             crm_node_t *node = NULL;
             xmlNode *remotes = pcmk__xe_create(acknak, PCMK_XE_NODES);
 
             g_hash_table_iter_init(&iter, crm_remote_peer_cache);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
                 xmlNode *remote = NULL;
 
                 if (!node->conn_host) {
                     continue;
                 }
 
                 remote = pcmk__xe_create(remotes, PCMK_XE_NODE);
                 pcmk__xe_set_props(remote,
                                    PCMK_XA_ID, node->uname,
                                    PCMK__XA_NODE_STATE, node->state,
                                    PCMK__XA_CONNECTION_HOST, node->conn_host,
                                    NULL);
             }
         }
     }
     send_cluster_message(join_node, crm_msg_crmd, acknak, TRUE);
     free_xml(acknak);
     return;
 }
 
 gboolean
 check_join_state(enum crmd_fsa_state cur_state, const char *source)
 {
     static unsigned long long highest_seq = 0;
 
     if (controld_globals.membership_id != crm_peer_seq) {
         crm_debug("join-%d: Membership changed from %llu to %llu "
                   CRM_XS " highest=%llu state=%s for=%s",
                   current_join_id, controld_globals.membership_id, crm_peer_seq,
                   highest_seq, fsa_state2string(cur_state), source);
         if(highest_seq < crm_peer_seq) {
             /* Don't spam the FSA with duplicates */
             highest_seq = crm_peer_seq;
             register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL);
         }
 
     } else if (cur_state == S_INTEGRATION) {
         if (crmd_join_phase_count(crm_join_welcomed) == 0) {
             int count = crmd_join_phase_count(crm_join_integrated);
 
             crm_debug("join-%d: Integration of %d peer%s complete "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             register_fsa_input_before(C_FSA_INTERNAL, I_INTEGRATED, NULL);
             return TRUE;
         }
 
     } else if (cur_state == S_FINALIZE_JOIN) {
         if (!pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) {
             crm_debug("join-%d: Delaying finalization until we have CIB "
                       CRM_XS " state=%s for=%s",
                       current_join_id, fsa_state2string(cur_state), source);
             return TRUE;
 
         } else if (crmd_join_phase_count(crm_join_welcomed) != 0) {
             int count = crmd_join_phase_count(crm_join_welcomed);
 
             crm_debug("join-%d: Still waiting on %d welcomed node%s "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             crmd_join_phase_log(LOG_DEBUG);
 
         } else if (crmd_join_phase_count(crm_join_integrated) != 0) {
             int count = crmd_join_phase_count(crm_join_integrated);
 
             crm_debug("join-%d: Still waiting on %d integrated node%s "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             crmd_join_phase_log(LOG_DEBUG);
 
         } else if (crmd_join_phase_count(crm_join_finalized) != 0) {
             int count = crmd_join_phase_count(crm_join_finalized);
 
             crm_debug("join-%d: Still waiting on %d finalized node%s "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             crmd_join_phase_log(LOG_DEBUG);
 
         } else {
             crm_debug("join-%d: Complete " CRM_XS " state=%s for=%s",
                       current_join_id, fsa_state2string(cur_state), source);
             register_fsa_input_later(C_FSA_INTERNAL, I_FINALIZED, NULL);
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 void
 do_dc_join_final(long long action,
                  enum crmd_fsa_cause cause,
                  enum crmd_fsa_state cur_state,
                  enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     crm_debug("Ensuring DC, quorum and node attributes are up-to-date");
     crm_update_quorum(crm_have_quorum, TRUE);
 }
 
 int crmd_join_phase_count(enum crm_join_phase phase)
 {
     int count = 0;
     crm_node_t *peer;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) {
         if(peer->join == phase) {
             count++;
         }
     }
     return count;
 }
 
 void crmd_join_phase_log(int level)
 {
     crm_node_t *peer;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) {
         do_crm_log(level, "join-%d: %s=%s", current_join_id, peer->uname,
                    crm_join_phase_str(peer->join));
     }
 }
diff --git a/include/crm/cluster/internal.h b/include/crm/cluster/internal.h
index b75784c256..a96a063943 100644
--- a/include/crm/cluster/internal.h
+++ b/include/crm/cluster/internal.h
@@ -1,151 +1,157 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_CLUSTER_INTERNAL__H
 #  define PCMK__CRM_CLUSTER_INTERNAL__H
 
 #  include <stdint.h>       // uint32_t, uint64_t
+
+#  include <glib.h>         // gboolean
+
 #  include <crm/cluster.h>
 
 /* *INDENT-OFF* */
 enum crm_proc_flag {
     crm_proc_none       = 0x00000001,
 
     // Cluster layers
     crm_proc_cpg        = 0x04000000,
 
     // Daemons
     crm_proc_execd      = 0x00000010,
     crm_proc_based      = 0x00000100,
     crm_proc_controld   = 0x00000200,
     crm_proc_attrd      = 0x00001000,
     crm_proc_schedulerd = 0x00010000,
     crm_proc_fenced     = 0x00100000,
 };
 /* *INDENT-ON* */
 
 // Used with node cache search functions
 enum pcmk__node_search_flags {
     pcmk__node_search_none      = 0,
     pcmk__node_search_cluster   = (1 << 0), // Search for cluster nodes
     pcmk__node_search_remote    = (1 << 1), // Search for remote nodes
     pcmk__node_search_any       = pcmk__node_search_cluster
                                   |pcmk__node_search_remote,
 
     /* @COMPAT The values before this must stay the same until we can drop
      * support for enum crm_get_peer_flags
      */
 
     pcmk__node_search_known     = (1 << 2), // Search previously known nodes
 };
 
 /*!
  * \internal
  * \brief Return the process bit corresponding to the current cluster stack
  *
  * \return Process flag if detectable, otherwise 0
  */
 static inline uint32_t
 crm_get_cluster_proc(void)
 {
     switch (get_cluster_type()) {
         case pcmk_cluster_corosync:
             return crm_proc_cpg;
 
         default:
             break;
     }
     return crm_proc_none;
 }
 
 /*!
  * \internal
  * \brief Get log-friendly string description of a Corosync return code
  *
  * \param[in] error  Corosync return code
  *
  * \return Log-friendly string description corresponding to \p error
  */
 static inline const char *
 pcmk__cs_err_str(int error)
 {
 #  if SUPPORT_COROSYNC
     switch (error) {
         case CS_OK:                         return "OK";
         case CS_ERR_LIBRARY:                return "Library error";
         case CS_ERR_VERSION:                return "Version error";
         case CS_ERR_INIT:                   return "Initialization error";
         case CS_ERR_TIMEOUT:                return "Timeout";
         case CS_ERR_TRY_AGAIN:              return "Try again";
         case CS_ERR_INVALID_PARAM:          return "Invalid parameter";
         case CS_ERR_NO_MEMORY:              return "No memory";
         case CS_ERR_BAD_HANDLE:             return "Bad handle";
         case CS_ERR_BUSY:                   return "Busy";
         case CS_ERR_ACCESS:                 return "Access error";
         case CS_ERR_NOT_EXIST:              return "Doesn't exist";
         case CS_ERR_NAME_TOO_LONG:          return "Name too long";
         case CS_ERR_EXIST:                  return "Exists";
         case CS_ERR_NO_SPACE:               return "No space";
         case CS_ERR_INTERRUPT:              return "Interrupt";
         case CS_ERR_NAME_NOT_FOUND:         return "Name not found";
         case CS_ERR_NO_RESOURCES:           return "No resources";
         case CS_ERR_NOT_SUPPORTED:          return "Not supported";
         case CS_ERR_BAD_OPERATION:          return "Bad operation";
         case CS_ERR_FAILED_OPERATION:       return "Failed operation";
         case CS_ERR_MESSAGE_ERROR:          return "Message error";
         case CS_ERR_QUEUE_FULL:             return "Queue full";
         case CS_ERR_QUEUE_NOT_AVAILABLE:    return "Queue not available";
         case CS_ERR_BAD_FLAGS:              return "Bad flags";
         case CS_ERR_TOO_BIG:                return "Too big";
         case CS_ERR_NO_SECTIONS:            return "No sections";
     }
 #  endif
     return "Corosync error";
 }
 
 #  if SUPPORT_COROSYNC
 
 #if 0
 /* This is the new way to do it, but we still support all Corosync 2 versions,
  * and this isn't always available. A better alternative here would be to check
  * for support in the configure script and enable this conditionally.
  */
 #define pcmk__init_cmap(handle) cmap_initialize_map((handle), CMAP_MAP_ICMAP)
 #else
 #define pcmk__init_cmap(handle) cmap_initialize(handle)
 #endif
 
 char *pcmk__corosync_cluster_name(void);
 bool pcmk__corosync_add_nodes(xmlNode *xml_parent);
 #  endif
 
 crm_node_t *crm_update_peer_proc(const char *source, crm_node_t * peer,
                                  uint32_t flag, const char *status);
 crm_node_t *pcmk__update_peer_state(const char *source, crm_node_t *node,
                                     const char *state, uint64_t membership);
 
 void pcmk__update_peer_expected(const char *source, crm_node_t *node,
                                 const char *expected);
 void pcmk__reap_unseen_nodes(uint64_t ring_id);
 
 void pcmk__corosync_quorum_connect(gboolean (*dispatch)(unsigned long long,
                                                         gboolean),
                                    void (*destroy) (gpointer));
+
+unsigned int pcmk__cluster_num_remote_nodes(void);
+
 crm_node_t *pcmk__search_node_caches(unsigned int id, const char *uname,
                                      uint32_t flags);
 crm_node_t *pcmk__search_cluster_node_cache(unsigned int id, const char *uname,
                                             const char *uuid);
 void pcmk__purge_node_from_cache(const char *node_name, uint32_t node_id);
 
 void pcmk__refresh_node_caches_from_cib(xmlNode *cib);
 
 crm_node_t *pcmk__get_node(unsigned int id, const char *uname,
                            const char *uuid, uint32_t flags);
 
 #endif // PCMK__CRM_CLUSTER_INTERNAL__H
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
index d116370f8f..11d0b09607 100644
--- a/lib/cluster/membership.c
+++ b/lib/cluster/membership.c
@@ -1,1384 +1,1399 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <sys/param.h>
 #include <sys/types.h>
 #include <stdio.h>
 #include <unistd.h>
 #include <string.h>
 #include <glib.h>
 #include <crm/common/ipc.h>
 #include <crm/common/xml_internal.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/xml.h>
 #include <crm/stonith-ng.h>
 #include "crmcluster_private.h"
 
 /* The peer cache remembers cluster nodes that have been seen.
  * This is managed mostly automatically by libcluster, based on
  * cluster membership events.
  *
  * Because cluster nodes can have conflicting names or UUIDs,
  * the hash table key is a uniquely generated ID.
  */
 GHashTable *crm_peer_cache = NULL;
 
 /*
  * The remote peer cache tracks pacemaker_remote nodes. While the
  * value has the same type as the peer cache's, it is tracked separately for
  * three reasons: pacemaker_remote nodes can't have conflicting names or UUIDs,
  * so the name (which is also the UUID) is used as the hash table key; there
  * is no equivalent of membership events, so management is not automatic; and
  * most users of the peer cache need to exclude pacemaker_remote nodes.
  *
  * That said, using a single cache would be more logical and less error-prone,
  * so it would be a good idea to merge them one day.
  *
  * libcluster provides two avenues for populating the cache:
  * crm_remote_peer_get() and crm_remote_peer_cache_remove() directly manage it,
  * while crm_remote_peer_cache_refresh() populates it via the CIB.
  */
 GHashTable *crm_remote_peer_cache = NULL;
 
 /*
  * The known node cache tracks cluster and remote nodes that have been seen in
  * the CIB. It is useful mainly when a caller needs to know about a node that
  * may no longer be in the membership, but doesn't want to add the node to the
  * main peer cache tables.
  */
 static GHashTable *known_node_cache = NULL;
 
 unsigned long long crm_peer_seq = 0;
 gboolean crm_have_quorum = FALSE;
 static gboolean crm_autoreap  = TRUE;
 
 // Flag setting and clearing for crm_node_t:flags
 
 #define set_peer_flags(peer, flags_to_set) do {                               \
         (peer)->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Peer", (peer)->uname,             \
                                            (peer)->flags, (flags_to_set),     \
                                            #flags_to_set);                    \
     } while (0)
 
 #define clear_peer_flags(peer, flags_to_clear) do {                           \
         (peer)->flags = pcmk__clear_flags_as(__func__, __LINE__,              \
                                              LOG_TRACE,                       \
                                              "Peer", (peer)->uname,           \
                                              (peer)->flags, (flags_to_clear), \
                                              #flags_to_clear);                \
     } while (0)
 
 static void update_peer_uname(crm_node_t *node, const char *uname);
 static crm_node_t *find_known_node(const char *id, const char *uname);
 
-int
-crm_remote_peer_cache_size(void)
+/*!
+ * \internal
+ * \brief Get the number of Pacemaker Remote nodes that have been seen
+ *
+ * \return Number of cached Pacemaker Remote nodes
+ */
+unsigned int
+pcmk__cluster_num_remote_nodes(void)
 {
     if (crm_remote_peer_cache == NULL) {
-        return 0;
+        return 0U;
     }
     return g_hash_table_size(crm_remote_peer_cache);
 }
 
+int
+crm_remote_peer_cache_size(void)
+{
+    unsigned int count = pcmk__cluster_num_remote_nodes();
+
+    return QB_MIN(count, INT_MAX);
+}
+
 /*!
  * \brief Get a remote node peer cache entry, creating it if necessary
  *
  * \param[in] node_name  Name of remote node
  *
  * \return Cache entry for node on success, NULL (and set errno) otherwise
  *
  * \note When creating a new entry, this will leave the node state undetermined,
  *       so the caller should also call pcmk__update_peer_state() if the state
  *       is known.
  * \note Because this can add and remove cache entries, callers should not
  *       assume any previously obtained cache entry pointers remain valid.
  */
 crm_node_t *
 crm_remote_peer_get(const char *node_name)
 {
     crm_node_t *node;
     char *node_name_copy = NULL;
 
     if (node_name == NULL) {
         errno = EINVAL;
         return NULL;
     }
 
     /* It's theoretically possible that the node was added to the cluster peer
      * cache before it was known to be a Pacemaker Remote node. Remove that
      * entry unless it has a node ID, which means the name actually is
      * associated with a cluster node. (@TODO return an error in that case?)
      */
     node = pcmk__search_node_caches(0, node_name, pcmk__node_search_cluster);
     if ((node != NULL) && (node->uuid == NULL)) {
         /* node_name could be a pointer into the cache entry being removed, so
          * reassign it to a copy before the original gets freed
          */
         node_name_copy = strdup(node_name);
         if (node_name_copy == NULL) {
             errno = ENOMEM;
             return NULL;
         }
         node_name = node_name_copy;
         reap_crm_member(0, node_name);
     }
 
     /* Return existing cache entry if one exists */
     node = g_hash_table_lookup(crm_remote_peer_cache, node_name);
     if (node) {
         free(node_name_copy);
         return node;
     }
 
     /* Allocate a new entry */
     node = calloc(1, sizeof(crm_node_t));
     if (node == NULL) {
         free(node_name_copy);
         return NULL;
     }
 
     /* Populate the essential information */
     set_peer_flags(node, crm_remote_node);
     node->uuid = strdup(node_name);
     if (node->uuid == NULL) {
         free(node);
         errno = ENOMEM;
         free(node_name_copy);
         return NULL;
     }
 
     /* Add the new entry to the cache */
     g_hash_table_replace(crm_remote_peer_cache, node->uuid, node);
     crm_trace("added %s to remote cache", node_name);
 
     /* Update the entry's uname, ensuring peer status callbacks are called */
     update_peer_uname(node, node_name);
     free(node_name_copy);
     return node;
 }
 
 /*!
  * \brief Remove a node from the Pacemaker Remote node cache
  *
  * \param[in] node_name  Name of node to remove from cache
  *
  * \note The caller must be careful not to use \p node_name after calling this
  *       function if it might be a pointer into the cache entry being removed.
  */
 void
 crm_remote_peer_cache_remove(const char *node_name)
 {
     /* Do a lookup first, because node_name could be a pointer within the entry
      * being removed -- we can't log it *after* removing it.
      */
     if (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL) {
         crm_trace("Removing %s from Pacemaker Remote node cache", node_name);
         g_hash_table_remove(crm_remote_peer_cache, node_name);
     }
 }
 
 /*!
  * \internal
  * \brief Return node status based on a CIB status entry
  *
  * \param[in] node_state  XML of node state
  *
  * \return \c CRM_NODE_LOST if \c PCMK__XA_IN_CCM is false in
  *         \c PCMK__XE_NODE_STATE, \c CRM_NODE_MEMBER otherwise
  * \note Unlike most boolean XML attributes, this one defaults to true, for
  *       backward compatibility with older controllers that don't set it.
  */
 static const char *
 remote_state_from_cib(const xmlNode *node_state)
 {
     bool status = false;
 
     if ((pcmk__xe_get_bool_attr(node_state, PCMK__XA_IN_CCM,
                                 &status) == pcmk_rc_ok) && !status) {
         return CRM_NODE_LOST;
     } else {
         return CRM_NODE_MEMBER;
     }
 }
 
 /* user data for looping through remote node xpath searches */
 struct refresh_data {
     const char *field;  /* XML attribute to check for node name */
     gboolean has_state; /* whether to update node state based on XML */
 };
 
 /*!
  * \internal
  * \brief Process one pacemaker_remote node xpath search result
  *
  * \param[in] result     XML search result
  * \param[in] user_data  what to look for in the XML
  */
 static void
 remote_cache_refresh_helper(xmlNode *result, void *user_data)
 {
     const struct refresh_data *data = user_data;
     const char *remote = crm_element_value(result, data->field);
     const char *state = NULL;
     crm_node_t *node;
 
     CRM_CHECK(remote != NULL, return);
 
     /* Determine node's state, if the result has it */
     if (data->has_state) {
         state = remote_state_from_cib(result);
     }
 
     /* Check whether cache already has entry for node */
     node = g_hash_table_lookup(crm_remote_peer_cache, remote);
 
     if (node == NULL) {
         /* Node is not in cache, so add a new entry for it */
         node = crm_remote_peer_get(remote);
         CRM_ASSERT(node);
         if (state) {
             pcmk__update_peer_state(__func__, node, state, 0);
         }
 
     } else if (pcmk_is_set(node->flags, crm_node_dirty)) {
         /* Node is in cache and hasn't been updated already, so mark it clean */
         clear_peer_flags(node, crm_node_dirty);
         if (state) {
             pcmk__update_peer_state(__func__, node, state, 0);
         }
     }
 }
 
 static void
 mark_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     set_peer_flags((crm_node_t *) value, crm_node_dirty);
 }
 
 static gboolean
 is_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     return pcmk_is_set(((crm_node_t*)value)->flags, crm_node_dirty);
 }
 
 /*!
  * \brief Repopulate the remote peer cache based on CIB XML
  *
  * \param[in] xmlNode  CIB XML to parse
  */
 void
 crm_remote_peer_cache_refresh(xmlNode *cib)
 {
     struct refresh_data data;
 
     crm_peer_init();
 
     /* First, we mark all existing cache entries as dirty,
      * so that later we can remove any that weren't in the CIB.
      * We don't empty the cache, because we need to detect changes in state.
      */
     g_hash_table_foreach(crm_remote_peer_cache, mark_dirty, NULL);
 
     /* Look for guest nodes and remote nodes in the status section */
     data.field = PCMK_XA_ID;
     data.has_state = TRUE;
     crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_STATUS,
                              remote_cache_refresh_helper, &data);
 
     /* Look for guest nodes and remote nodes in the configuration section,
      * because they may have just been added and not have a status entry yet.
      * In that case, the cached node state will be left NULL, so that the
      * peer status callback isn't called until we're sure the node started
      * successfully.
      */
     data.field = PCMK_XA_VALUE;
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, PCMK__XP_GUEST_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
     data.field = PCMK_XA_ID;
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
 
     /* Remove all old cache entries that weren't seen in the CIB */
     g_hash_table_foreach_remove(crm_remote_peer_cache, is_dirty, NULL);
 }
 
 gboolean
 crm_is_peer_active(const crm_node_t * node)
 {
     if(node == NULL) {
         return FALSE;
     }
 
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         /* remote nodes are never considered active members. This
          * guarantees they will never be considered for DC membership.*/
         return FALSE;
     }
 #if SUPPORT_COROSYNC
     if (is_corosync_cluster()) {
         return crm_is_corosync_peer_active(node);
     }
 #endif
     crm_err("Unhandled cluster type: %s", name_for_cluster_type(get_cluster_type()));
     return FALSE;
 }
 
 static gboolean
 crm_reap_dead_member(gpointer key, gpointer value, gpointer user_data)
 {
     crm_node_t *node = value;
     crm_node_t *search = user_data;
 
     if (search == NULL) {
         return FALSE;
 
     } else if (search->id && node->id != search->id) {
         return FALSE;
 
     } else if (search->id == 0 && !pcmk__str_eq(node->uname, search->uname, pcmk__str_casei)) {
         return FALSE;
 
     } else if (crm_is_peer_active(value) == FALSE) {
         crm_info("Removing node with name %s and " PCMK_XA_ID
                  " %u from membership cache",
                  (node->uname? node->uname : "unknown"), node->id);
         return TRUE;
     }
     return FALSE;
 }
 
 /*!
  * \brief Remove all peer cache entries matching a node ID and/or uname
  *
  * \param[in] id    ID of node to remove (or 0 to ignore)
  * \param[in] name  Uname of node to remove (or NULL to ignore)
  *
  * \return Number of cache entries removed
  *
  * \note The caller must be careful not to use \p name after calling this
  *       function if it might be a pointer into the cache entry being removed.
  */
 guint
 reap_crm_member(uint32_t id, const char *name)
 {
     int matches = 0;
     crm_node_t search = { 0, };
 
     if (crm_peer_cache == NULL) {
         crm_trace("Membership cache not initialized, ignoring purge request");
         return 0;
     }
 
     search.id = id;
     search.uname = pcmk__str_copy(name);
     matches = g_hash_table_foreach_remove(crm_peer_cache, crm_reap_dead_member, &search);
     if(matches) {
         crm_notice("Purged %d peer%s with " PCMK_XA_ID
                    "=%u%s%s from the membership cache",
                    matches, pcmk__plural_s(matches), search.id,
                    (search.uname? " and/or uname=" : ""),
                    (search.uname? search.uname : ""));
 
     } else {
         crm_info("No peers with " PCMK_XA_ID
                  "=%u%s%s to purge from the membership cache",
                  search.id, (search.uname? " and/or uname=" : ""),
                  (search.uname? search.uname : ""));
     }
 
     free(search.uname);
     return matches;
 }
 
 static void
 count_peer(gpointer key, gpointer value, gpointer user_data)
 {
     guint *count = user_data;
     crm_node_t *node = value;
 
     if (crm_is_peer_active(node)) {
         *count = *count + 1;
     }
 }
 
 guint
 crm_active_peers(void)
 {
     guint count = 0;
 
     if (crm_peer_cache) {
         g_hash_table_foreach(crm_peer_cache, count_peer, &count);
     }
     return count;
 }
 
 static void
 destroy_crm_node(gpointer data)
 {
     crm_node_t *node = data;
 
     crm_trace("Destroying entry for node %u: %s", node->id, node->uname);
 
     free(node->uname);
     free(node->state);
     free(node->uuid);
     free(node->expected);
     free(node->conn_host);
     free(node);
 }
 
 void
 crm_peer_init(void)
 {
     if (crm_peer_cache == NULL) {
         crm_peer_cache = pcmk__strikey_table(free, destroy_crm_node);
     }
 
     if (crm_remote_peer_cache == NULL) {
         crm_remote_peer_cache = pcmk__strikey_table(NULL, destroy_crm_node);
     }
 
     if (known_node_cache == NULL) {
         known_node_cache = pcmk__strikey_table(free, destroy_crm_node);
     }
 }
 
 void
 crm_peer_destroy(void)
 {
     if (crm_peer_cache != NULL) {
         crm_trace("Destroying peer cache with %d members", g_hash_table_size(crm_peer_cache));
         g_hash_table_destroy(crm_peer_cache);
         crm_peer_cache = NULL;
     }
 
     if (crm_remote_peer_cache != NULL) {
-        crm_trace("Destroying remote peer cache with %d members", g_hash_table_size(crm_remote_peer_cache));
+        crm_trace("Destroying remote peer cache with %d members",
+                  pcmk__cluster_num_remote_nodes());
         g_hash_table_destroy(crm_remote_peer_cache);
         crm_remote_peer_cache = NULL;
     }
 
     if (known_node_cache != NULL) {
         crm_trace("Destroying known node cache with %d members",
                   g_hash_table_size(known_node_cache));
         g_hash_table_destroy(known_node_cache);
         known_node_cache = NULL;
     }
 
 }
 
 static void (*peer_status_callback)(enum crm_status_type, crm_node_t *,
                                     const void *) = NULL;
 
 /*!
  * \brief Set a client function that will be called after peer status changes
  *
  * \param[in] dispatch  Pointer to function to use as callback
  *
  * \note Previously, client callbacks were responsible for peer cache
  *       management. This is no longer the case, and client callbacks should do
  *       only client-specific handling. Callbacks MUST NOT add or remove entries
  *       in the peer caches.
  */
 void
 crm_set_status_callback(void (*dispatch) (enum crm_status_type, crm_node_t *, const void *))
 {
     peer_status_callback = dispatch;
 }
 
 /*!
  * \brief Tell the library whether to automatically reap lost nodes
  *
  * If TRUE (the default), calling crm_update_peer_proc() will also update the
  * peer state to CRM_NODE_MEMBER or CRM_NODE_LOST, and pcmk__update_peer_state()
  * will reap peers whose state changes to anything other than CRM_NODE_MEMBER.
  * Callers should leave this enabled unless they plan to manage the cache
  * separately on their own.
  *
  * \param[in] autoreap  TRUE to enable automatic reaping, FALSE to disable
  */
 void
 crm_set_autoreap(gboolean autoreap)
 {
     crm_autoreap = autoreap;
 }
 
 static void
 dump_peer_hash(int level, const char *caller)
 {
     GHashTableIter iter;
     const char *id = NULL;
     crm_node_t *node = NULL;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, (gpointer *) &id, (gpointer *) &node)) {
         do_crm_log(level, "%s: Node %u/%s = %p - %s", caller, node->id, node->uname, node, id);
     }
 }
 
 static gboolean
 hash_find_by_data(gpointer key, gpointer value, gpointer user_data)
 {
     return value == user_data;
 }
 
 /*!
  * \internal
  * \brief Search caches for a node (cluster or Pacemaker Remote)
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] flags  Group of enum pcmk__node_search_flags
  *
  * \return Node cache entry if found, otherwise NULL
  */
 crm_node_t *
 pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags)
 {
     crm_node_t *node = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     if ((uname != NULL) && pcmk_is_set(flags, pcmk__node_search_remote)) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
     }
 
     if ((node == NULL) && pcmk_is_set(flags, pcmk__node_search_cluster)) {
         node = pcmk__search_cluster_node_cache(id, uname, NULL);
     }
 
     if ((node == NULL) && pcmk_is_set(flags, pcmk__node_search_known)) {
         char *id_str = (id == 0)? NULL : crm_strdup_printf("%u", id);
 
         node = find_known_node(id_str, uname);
         free(id_str);
     }
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Purge a node from cache (both cluster and Pacemaker Remote)
  *
  * \param[in] node_name  If not NULL, purge only nodes with this name
  * \param[in] node_id    If not 0, purge cluster nodes only if they have this ID
  *
  * \note If \p node_name is NULL and \p node_id is 0, no nodes will be purged.
  *       If \p node_name is not NULL and \p node_id is not 0, Pacemaker Remote
  *       nodes that match \p node_name will be purged, and cluster nodes that
  *       match both \p node_name and \p node_id will be purged.
  * \note The caller must be careful not to use \p node_name after calling this
  *       function if it might be a pointer into a cache entry being removed.
  */
 void
 pcmk__purge_node_from_cache(const char *node_name, uint32_t node_id)
 {
     char *node_name_copy = NULL;
 
     if ((node_name == NULL) && (node_id == 0U)) {
         return;
     }
 
     // Purge from Pacemaker Remote node cache
     if ((node_name != NULL)
         && (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL)) {
         /* node_name could be a pointer into the cache entry being purged,
          * so reassign it to a copy before the original gets freed
          */
         node_name_copy = pcmk__str_copy(node_name);
         node_name = node_name_copy;
 
         crm_trace("Purging %s from Pacemaker Remote node cache", node_name);
         g_hash_table_remove(crm_remote_peer_cache, node_name);
     }
 
     reap_crm_member(node_id, node_name);
     free(node_name_copy);
 }
 
 /*!
  * \internal
  * \brief Search cluster node cache
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] uuid   If not NULL while id is 0, node UUID instead of cluster
  *                   node ID to search for
  *
  * \return Cluster node cache entry if found, otherwise NULL
  */
 crm_node_t *
 pcmk__search_cluster_node_cache(unsigned int id, const char *uname,
                                 const char *uuid)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
     crm_node_t *by_id = NULL;
     crm_node_t *by_name = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     if (uname != NULL) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(node->uname && strcasecmp(node->uname, uname) == 0) {
                 crm_trace("Name match: %s = %p", node->uname, node);
                 by_name = node;
                 break;
             }
         }
     }
 
     if (id > 0) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(node->id == id) {
                 crm_trace("ID match: %u = %p", node->id, node);
                 by_id = node;
                 break;
             }
         }
 
     } else if (uuid != NULL) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (pcmk__str_eq(node->uuid, uuid, pcmk__str_casei)) {
                 crm_trace("UUID match: %s = %p", node->uuid, node);
                 by_id = node;
                 break;
             }
         }
     }
 
     node = by_id; /* Good default */
     if(by_id == by_name) {
         /* Nothing to do if they match (both NULL counts) */
         crm_trace("Consistent: %p for %u/%s", by_id, id, uname);
 
     } else if(by_id == NULL && by_name) {
         crm_trace("Only one: %p for %u/%s", by_name, id, uname);
 
         if(id && by_name->id) {
             dump_peer_hash(LOG_WARNING, __func__);
             crm_crit("Node %u and %u share the same name '%s'",
                      id, by_name->id, uname);
             node = NULL; /* Create a new one */
 
         } else {
             node = by_name;
         }
 
     } else if(by_name == NULL && by_id) {
         crm_trace("Only one: %p for %u/%s", by_id, id, uname);
 
         if(uname && by_id->uname) {
             dump_peer_hash(LOG_WARNING, __func__);
             crm_crit("Node '%s' and '%s' share the same cluster nodeid %u: assuming '%s' is correct",
                      uname, by_id->uname, id, uname);
         }
 
     } else if(uname && by_id->uname) {
         if(pcmk__str_eq(uname, by_id->uname, pcmk__str_casei)) {
             crm_notice("Node '%s' has changed its ID from %u to %u", by_id->uname, by_name->id, by_id->id);
             g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name);
 
         } else {
             crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname);
             dump_peer_hash(LOG_INFO, __func__);
             crm_abort(__FILE__, __func__, __LINE__, "member weirdness", TRUE,
                       TRUE);
         }
 
     } else if(id && by_name->id) {
         crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname);
 
     } else {
         /* Simple merge */
 
         /* Only corosync-based clusters use node IDs. The functions that call
          * pcmk__update_peer_state() and crm_update_peer_proc() only know
          * nodeid, so 'by_id' is authoritative when merging.
          */
         dump_peer_hash(LOG_DEBUG, __func__);
 
         crm_info("Merging %p into %p", by_name, by_id);
         g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name);
     }
 
     return node;
 }
 
 #if SUPPORT_COROSYNC
 static guint
 remove_conflicting_peer(crm_node_t *node)
 {
     int matches = 0;
     GHashTableIter iter;
     crm_node_t *existing_node = NULL;
 
     if (node->id == 0 || node->uname == NULL) {
         return 0;
     }
 
     if (!pcmk__corosync_has_nodelist()) {
         return 0;
     }
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &existing_node)) {
         if (existing_node->id > 0
             && existing_node->id != node->id
             && existing_node->uname != NULL
             && strcasecmp(existing_node->uname, node->uname) == 0) {
 
             if (crm_is_peer_active(existing_node)) {
                 continue;
             }
 
             crm_warn("Removing cached offline node %u/%s which has conflicting uname with %u",
                      existing_node->id, existing_node->uname, node->id);
 
             g_hash_table_iter_remove(&iter);
             matches++;
         }
     }
 
     return matches;
 }
 #endif
 
 /*!
  * \brief Get a cluster node cache entry
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] uuid   If not NULL while id is 0, node UUID instead of cluster
  *                   node ID to search for
  * \param[in] flags  Group of enum pcmk__node_search_flags
  *
  * \return (Possibly newly created) cluster node cache entry
  */
 /* coverity[-alloc] Memory is referenced in one or both hashtables */
 crm_node_t *
 pcmk__get_node(unsigned int id, const char *uname, const char *uuid,
                uint32_t flags)
 {
     crm_node_t *node = NULL;
     char *uname_lookup = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     // Check the Pacemaker Remote node cache first
     if (pcmk_is_set(flags, pcmk__node_search_remote)) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
         if (node != NULL) {
             return node;
         }
     }
 
     if (!pcmk_is_set(flags, pcmk__node_search_cluster)) {
         return NULL;
     }
 
     node = pcmk__search_cluster_node_cache(id, uname, uuid);
 
     /* if uname wasn't provided, and find_peer did not turn up a uname based on id.
      * we need to do a lookup of the node name using the id in the cluster membership. */
     if ((node == NULL || node->uname == NULL) && (uname == NULL)) { 
         uname_lookup = get_node_name(id);
     }
 
     if (uname_lookup) {
         uname = uname_lookup;
         crm_trace("Inferred a name of '%s' for node %u", uname, id);
 
         /* try to turn up the node one more time now that we know the uname. */
         if (node == NULL) {
             node = pcmk__search_cluster_node_cache(id, uname, uuid);
         }
     }
 
     if (node == NULL) {
         char *uniqueid = crm_generate_uuid();
 
         node = pcmk__assert_alloc(1, sizeof(crm_node_t));
 
         crm_info("Created entry %s/%p for node %s/%u (%d total)",
                  uniqueid, node, uname, id, 1 + g_hash_table_size(crm_peer_cache));
         g_hash_table_replace(crm_peer_cache, uniqueid, node);
     }
 
     if(id > 0 && uname && (node->id == 0 || node->uname == NULL)) {
         crm_info("Node %u is now known as %s", id, uname);
     }
 
     if(id > 0 && node->id == 0) {
         node->id = id;
     }
 
     if (uname && (node->uname == NULL)) {
         update_peer_uname(node, uname);
     }
 
     if(node->uuid == NULL) {
         if (uuid == NULL) {
             uuid = crm_peer_uuid(node);
         }
 
         if (uuid) {
             crm_info("Node %u has uuid %s", id, uuid);
 
         } else {
             crm_info("Cannot obtain a UUID for node %u/%s", id, node->uname);
         }
     }
 
     free(uname_lookup);
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Update a node's uname
  *
  * \param[in,out] node   Node object to update
  * \param[in]     uname  New name to set
  *
  * \note This function should not be called within a peer cache iteration,
  *       because in some cases it can remove conflicting cache entries,
  *       which would invalidate the iterator.
  */
 static void
 update_peer_uname(crm_node_t *node, const char *uname)
 {
     CRM_CHECK(uname != NULL,
               crm_err("Bug: can't update node name without name"); return);
     CRM_CHECK(node != NULL,
               crm_err("Bug: can't update node name to %s without node", uname);
               return);
 
     if (pcmk__str_eq(uname, node->uname, pcmk__str_casei)) {
         crm_debug("Node uname '%s' did not change", uname);
         return;
     }
 
     for (const char *c = uname; *c; ++c) {
         if ((*c >= 'A') && (*c <= 'Z')) {
             crm_warn("Node names with capitals are discouraged, consider changing '%s'",
                      uname);
             break;
         }
     }
 
     pcmk__str_update(&node->uname, uname);
 
     if (peer_status_callback != NULL) {
         peer_status_callback(crm_status_uname, node, NULL);
     }
 
 #if SUPPORT_COROSYNC
     if (is_corosync_cluster() && !pcmk_is_set(node->flags, crm_remote_node)) {
         remove_conflicting_peer(node);
     }
 #endif
 }
 
 /*!
  * \internal
  * \brief Get log-friendly string equivalent of a process flag
  *
  * \param[in] proc  Process flag
  *
  * \return Log-friendly string equivalent of \p proc
  */
 static inline const char *
 proc2text(enum crm_proc_flag proc)
 {
     const char *text = "unknown";
 
     switch (proc) {
         case crm_proc_none:
             text = "none";
             break;
         case crm_proc_based:
             text = "pacemaker-based";
             break;
         case crm_proc_controld:
             text = "pacemaker-controld";
             break;
         case crm_proc_schedulerd:
             text = "pacemaker-schedulerd";
             break;
         case crm_proc_execd:
             text = "pacemaker-execd";
             break;
         case crm_proc_attrd:
             text = "pacemaker-attrd";
             break;
         case crm_proc_fenced:
             text = "pacemaker-fenced";
             break;
         case crm_proc_cpg:
             text = "corosync-cpg";
             break;
     }
     return text;
 }
 
 /*!
  * \internal
  * \brief Update a node's process information (and potentially state)
  *
  * \param[in]     source  Caller's function name (for log messages)
  * \param[in,out] node    Node object to update
  * \param[in]     flag    Bitmask of new process information
  * \param[in]     status  node status (online, offline, etc.)
  *
  * \return NULL if any node was reaped from peer caches, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible, otherwise
  *       reaping could invalidate the iterator.
  */
 crm_node_t *
 crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const char *status)
 {
     uint32_t last = 0;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set %s to %s for NULL",
                                     source, proc2text(flag), status);
                             return NULL);
 
     /* Pacemaker doesn't spawn processes on remote nodes */
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         return node;
     }
 
     last = node->processes;
     if (status == NULL) {
         node->processes = flag;
         if (node->processes != last) {
             changed = TRUE;
         }
 
     } else if (pcmk__str_eq(status, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
         if ((node->processes & flag) != flag) {
             node->processes = pcmk__set_flags_as(__func__, __LINE__,
                                                  LOG_TRACE, "Peer process",
                                                  node->uname, node->processes,
                                                  flag, "processes");
             changed = TRUE;
         }
 
     } else if (node->processes & flag) {
         node->processes = pcmk__clear_flags_as(__func__, __LINE__,
                                                LOG_TRACE, "Peer process",
                                                node->uname, node->processes,
                                                flag, "processes");
         changed = TRUE;
     }
 
     if (changed) {
         if (status == NULL && flag <= crm_proc_none) {
             crm_info("%s: Node %s[%u] - all processes are now offline", source, node->uname,
                      node->id);
         } else {
             crm_info("%s: Node %s[%u] - %s is now %s", source, node->uname, node->id,
                      proc2text(flag), status);
         }
 
         if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
             node->when_online = time(NULL);
 
         } else {
             node->when_online = 0;
         }
 
         /* Call the client callback first, then update the peer state,
          * in case the node will be reaped
          */
         if (peer_status_callback != NULL) {
             peer_status_callback(crm_status_processes, node, &last);
         }
 
         /* The client callback shouldn't touch the peer caches,
          * but as a safety net, bail if the peer cache was destroyed.
          */
         if (crm_peer_cache == NULL) {
             return NULL;
         }
 
         if (crm_autoreap) {
             const char *peer_state = NULL;
 
             if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
                 peer_state = CRM_NODE_MEMBER;
             } else {
                 peer_state = CRM_NODE_LOST;
             }
             node = pcmk__update_peer_state(__func__, node, peer_state, 0);
         }
     } else {
         crm_trace("%s: Node %s[%u] - %s is unchanged (%s)", source, node->uname, node->id,
                   proc2text(flag), status);
     }
     return node;
 }
 
 /*!
  * \internal
  * \brief Update a cluster node cache entry's expected join state
  *
  * \param[in]     source    Caller's function name (for logging)
  * \param[in,out] node      Node to update
  * \param[in]     expected  Node's new join state
  */
 void
 pcmk__update_peer_expected(const char *source, crm_node_t *node,
                            const char *expected)
 {
     char *last = NULL;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set 'expected' to %s", source, expected);
               return);
 
     /* Remote nodes don't participate in joins */
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         return;
     }
 
     last = node->expected;
     if (expected != NULL && !pcmk__str_eq(node->expected, expected, pcmk__str_casei)) {
         node->expected = strdup(expected);
         changed = TRUE;
     }
 
     if (changed) {
         crm_info("%s: Node %s[%u] - expected state is now %s (was %s)", source, node->uname, node->id,
                  expected, last);
         free(last);
     } else {
         crm_trace("%s: Node %s[%u] - expected state is unchanged (%s)", source, node->uname,
                   node->id, expected);
     }
 }
 
 /*!
  * \internal
  * \brief Update a node's state and membership information
  *
  * \param[in]     source      Caller's function name (for log messages)
  * \param[in,out] node        Node object to update
  * \param[in]     state       Node's new state
  * \param[in]     membership  Node's new membership ID
  * \param[in,out] iter        If not NULL, pointer to node's peer cache iterator
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function may be called from
  *       within a peer cache iteration if the iterator is supplied.
  */
 static crm_node_t *
 update_peer_state_iter(const char *source, crm_node_t *node, const char *state,
                        uint64_t membership, GHashTableIter *iter)
 {
     gboolean is_member;
 
     CRM_CHECK(node != NULL,
               crm_err("Could not set state for unknown host to %s"
                       CRM_XS " source=%s", state, source);
               return NULL);
 
     is_member = pcmk__str_eq(state, CRM_NODE_MEMBER, pcmk__str_casei);
     if (is_member) {
         node->when_lost = 0;
         if (membership) {
             node->last_seen = membership;
         }
     }
 
     if (state && !pcmk__str_eq(node->state, state, pcmk__str_casei)) {
         char *last = node->state;
 
         if (is_member) {
              node->when_member = time(NULL);
 
         } else {
              node->when_member = 0;
         }
 
         node->state = strdup(state);
         crm_notice("Node %s state is now %s " CRM_XS
                    " nodeid=%u previous=%s source=%s", node->uname, state,
                    node->id, (last? last : "unknown"), source);
         if (peer_status_callback != NULL) {
             peer_status_callback(crm_status_nstate, node, last);
         }
         free(last);
 
         if (crm_autoreap && !is_member
             && !pcmk_is_set(node->flags, crm_remote_node)) {
             /* We only autoreap from the peer cache, not the remote peer cache,
              * because the latter should be managed only by
              * crm_remote_peer_cache_refresh().
              */
             if(iter) {
                 crm_notice("Purged 1 peer with " PCMK_XA_ID
                            "=%u and/or uname=%s from the membership cache",
                            node->id, node->uname);
                 g_hash_table_iter_remove(iter);
 
             } else {
                 reap_crm_member(node->id, node->uname);
             }
             node = NULL;
         }
 
     } else {
         crm_trace("Node %s state is unchanged (%s) " CRM_XS
                   " nodeid=%u source=%s", node->uname, state, node->id, source);
     }
     return node;
 }
 
 /*!
  * \brief Update a node's state and membership information
  *
  * \param[in]     source      Caller's function name (for log messages)
  * \param[in,out] node        Node object to update
  * \param[in]     state       Node's new state
  * \param[in]     membership  Node's new membership ID
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible,
  *       otherwise reaping could invalidate the iterator.
  */
 crm_node_t *
 pcmk__update_peer_state(const char *source, crm_node_t *node,
                         const char *state, uint64_t membership)
 {
     return update_peer_state_iter(source, node, state, membership, NULL);
 }
 
 /*!
  * \internal
  * \brief Reap all nodes from cache whose membership information does not match
  *
  * \param[in] membership  Membership ID of nodes to keep
  */
 void
 pcmk__reap_unseen_nodes(uint64_t membership)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
 
     crm_trace("Reaping unseen nodes...");
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&node)) {
         if (node->last_seen != membership) {
             if (node->state) {
                 /*
                  * Calling update_peer_state_iter() allows us to
                  * remove the node from crm_peer_cache without
                  * invalidating our iterator
                  */
                 update_peer_state_iter(__func__, node, CRM_NODE_LOST,
                                            membership, &iter);
 
             } else {
                 crm_info("State of node %s[%u] is still unknown",
                          node->uname, node->id);
             }
         }
     }
 }
 
 static crm_node_t *
 find_known_node(const char *id, const char *uname)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
     crm_node_t *by_id = NULL;
     crm_node_t *by_name = NULL;
 
     if (uname) {
         g_hash_table_iter_init(&iter, known_node_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (node->uname && strcasecmp(node->uname, uname) == 0) {
                 crm_trace("Name match: %s = %p", node->uname, node);
                 by_name = node;
                 break;
             }
         }
     }
 
     if (id) {
         g_hash_table_iter_init(&iter, known_node_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(strcasecmp(node->uuid, id) == 0) {
                 crm_trace("ID match: %s= %p", id, node);
                 by_id = node;
                 break;
             }
         }
     }
 
     node = by_id; /* Good default */
     if (by_id == by_name) {
         /* Nothing to do if they match (both NULL counts) */
         crm_trace("Consistent: %p for %s/%s", by_id, id, uname);
 
     } else if (by_id == NULL && by_name) {
         crm_trace("Only one: %p for %s/%s", by_name, id, uname);
 
         if (id) {
             node = NULL;
 
         } else {
             node = by_name;
         }
 
     } else if (by_name == NULL && by_id) {
         crm_trace("Only one: %p for %s/%s", by_id, id, uname);
 
         if (uname) {
             node = NULL;
         }
 
     } else if (uname && by_id->uname
                && pcmk__str_eq(uname, by_id->uname, pcmk__str_casei)) {
         /* Multiple nodes have the same uname in the CIB.
          * Return by_id. */
 
     } else if (id && by_name->uuid
                && pcmk__str_eq(id, by_name->uuid, pcmk__str_casei)) {
         /* Multiple nodes have the same id in the CIB.
          * Return by_name. */
         node = by_name;
 
     } else {
         node = NULL;
     }
 
     if (node == NULL) {
         crm_debug("Couldn't find node%s%s%s%s",
                    id? " " : "",
                    id? id : "",
                    uname? " with name " : "",
                    uname? uname : "");
     }
 
     return node;
 }
 
 static void
 known_node_cache_refresh_helper(xmlNode *xml_node, void *user_data)
 {
     const char *id = crm_element_value(xml_node, PCMK_XA_ID);
     const char *uname = crm_element_value(xml_node, PCMK_XA_UNAME);
     crm_node_t * node =  NULL;
 
     CRM_CHECK(id != NULL && uname !=NULL, return);
     node = find_known_node(id, uname);
 
     if (node == NULL) {
         char *uniqueid = crm_generate_uuid();
 
         node = pcmk__assert_alloc(1, sizeof(crm_node_t));
 
         node->uname = pcmk__str_copy(uname);
         node->uuid = pcmk__str_copy(id);
 
         g_hash_table_replace(known_node_cache, uniqueid, node);
 
     } else if (pcmk_is_set(node->flags, crm_node_dirty)) {
         pcmk__str_update(&node->uname, uname);
 
         /* Node is in cache and hasn't been updated already, so mark it clean */
         clear_peer_flags(node, crm_node_dirty);
     }
 
 }
 
 static void
 refresh_known_node_cache(xmlNode *cib)
 {
     crm_peer_init();
 
     g_hash_table_foreach(known_node_cache, mark_dirty, NULL);
 
     crm_foreach_xpath_result(cib, PCMK__XP_MEMBER_NODE_CONFIG,
                              known_node_cache_refresh_helper, NULL);
 
     /* Remove all old cache entries that weren't seen in the CIB */
     g_hash_table_foreach_remove(known_node_cache, is_dirty, NULL);
 }
 
 void
 pcmk__refresh_node_caches_from_cib(xmlNode *cib)
 {
     crm_remote_peer_cache_refresh(cib);
     refresh_known_node_cache(cib);
 }
 
 // Deprecated functions kept only for backward API compatibility
 // LCOV_EXCL_START
 
 #include <crm/cluster/compat.h>
 
 int
 crm_terminate_member(int nodeid, const char *uname, void *unused)
 {
     return stonith_api_kick(nodeid, uname, 120, TRUE);
 }
 
 int
 crm_terminate_member_no_mainloop(int nodeid, const char *uname, int *connection)
 {
     return stonith_api_kick(nodeid, uname, 120, TRUE);
 }
 
 crm_node_t *
 crm_get_peer(unsigned int id, const char *uname)
 {
     return pcmk__get_node(id, uname, NULL, pcmk__node_search_cluster);
 }
 
 crm_node_t *
 crm_get_peer_full(unsigned int id, const char *uname, int flags)
 {
     return pcmk__get_node(id, uname, NULL, flags);
 }
 
 // LCOV_EXCL_STOP
 // End deprecated API