diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c
index 81f5b13137..c8fecd5812 100644
--- a/daemons/controld/controld_callbacks.c
+++ b/daemons/controld/controld_callbacks.c
@@ -1,381 +1,381 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <string.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/cluster.h>
 #include <crm/cib.h>
 
 #include <pacemaker-controld.h>
 
 /* From join_dc... */
 extern gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source);
 
 void
 crmd_ha_msg_filter(xmlNode * msg)
 {
     if (AM_I_DC) {
         const char *sys_from = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM);
 
         if (pcmk__str_eq(sys_from, CRM_SYSTEM_DC, pcmk__str_casei)) {
             const char *from = crm_element_value(msg, PCMK__XA_SRC);
 
             if (!pcmk__str_eq(from, controld_globals.our_nodename,
                               pcmk__str_casei)) {
                 int level = LOG_INFO;
                 const char *op = crm_element_value(msg, PCMK__XA_CRM_TASK);
 
                 /* make sure the election happens NOW */
                 if (controld_globals.fsa_state != S_ELECTION) {
                     ha_msg_input_t new_input;
 
                     level = LOG_WARNING;
                     new_input.msg = msg;
                     register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION, NULL, &new_input,
                                            __func__);
                 }
 
                 do_crm_log(level, "Another DC detected: %s (op=%s)", from, op);
                 goto done;
             }
         }
 
     } else {
         const char *sys_to = crm_element_value(msg, PCMK__XA_CRM_SYS_TO);
 
         if (pcmk__str_eq(sys_to, CRM_SYSTEM_DC, pcmk__str_casei)) {
             return;
         }
     }
 
     /* crm_log_xml_trace(msg, "HA[inbound]"); */
     route_message(C_HA_MESSAGE, msg);
 
   done:
     controld_trigger_fsa();
 }
 
 /*!
  * \internal
  * \brief Check whether a node is online
  *
  * \param[in] node  Node to check
  *
  * \retval -1 if completely dead
  * \retval  0 if partially alive
  * \retval  1 if completely alive
  */
 static int
 node_alive(const crm_node_t *node)
 {
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         // Pacemaker Remote nodes can't be partially alive
         return pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_casei) ? 1: -1;
 
-    } else if (crm_is_peer_active(node)) {
+    } else if (pcmk__cluster_is_node_active(node)) {
         // Completely up cluster node: both cluster member and peer
         return 1;
 
     } else if (!pcmk_is_set(node->processes, crm_get_cluster_proc())
                && !pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_casei)) {
         // Completely down cluster node: neither cluster member nor peer
         return -1;
     }
 
     // Partially up cluster node: only cluster member or only peer
     return 0;
 }
 
 #define state_text(state) ((state)? (const char *)(state) : "in unknown state")
 
 void
 peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
 {
     uint32_t old = 0;
     bool appeared = FALSE;
     bool is_remote = pcmk_is_set(node->flags, crm_remote_node);
 
     controld_node_pending_timer(node);
 
     /* The controller waits to receive some information from the membership
      * layer before declaring itself operational. If this is being called for a
      * cluster node, indicate that we have it.
      */
     if (!is_remote) {
         controld_set_fsa_input_flags(R_PEER_DATA);
     }
 
     if (type == crm_status_processes
         && pcmk_is_set(node->processes, crm_get_cluster_proc())
         && !AM_I_DC
         && !is_remote) {
         /*
          * This is a hack until we can send to a nodeid and/or we fix node name lookups
          * These messages are ignored in crmd_ha_msg_filter()
          */
         xmlNode *query = create_request(CRM_OP_HELLO, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
 
         crm_debug("Sending hello to node %u so that it learns our node name", node->id);
         send_cluster_message(node, crm_msg_crmd, query, FALSE);
 
         free_xml(query);
     }
 
     if (node->uname == NULL) {
         return;
     }
 
     switch (type) {
         case crm_status_uname:
             /* If we've never seen the node, then it also won't be in the status section */
             crm_info("%s node %s is now %s",
                      (is_remote? "Remote" : "Cluster"),
                      node->uname, state_text(node->state));
             return;
 
         case crm_status_nstate:
             /* This callback should not be called unless the state actually
              * changed, but here's a failsafe just in case.
              */
             CRM_CHECK(!pcmk__str_eq(data, node->state, pcmk__str_casei),
                       return);
 
             crm_info("%s node %s is now %s (was %s)",
                      (is_remote? "Remote" : "Cluster"),
                      node->uname, state_text(node->state), state_text(data));
 
             if (pcmk__str_eq(CRM_NODE_MEMBER, node->state, pcmk__str_casei)) {
                 appeared = TRUE;
                 if (!is_remote) {
                     remove_stonith_cleanup(node->uname);
                 }
             } else {
                 controld_remove_failed_sync_node(node->uname);
                 controld_remove_voter(node->uname);
             }
 
             crmd_alert_node_event(node);
             break;
 
         case crm_status_processes:
             CRM_CHECK(data != NULL, return);
             old = *(const uint32_t *)data;
             appeared = pcmk_is_set(node->processes, crm_get_cluster_proc());
 
             {
                 const char *dc_s = controld_globals.dc_name;
 
                 if ((dc_s == NULL) && AM_I_DC) {
                     dc_s = PCMK_VALUE_TRUE;
                 }
 
                 crm_info("Node %s is %s a peer " CRM_XS
                          " DC=%s old=%#07x new=%#07x",
                          node->uname, (appeared? "now" : "no longer"),
                          pcmk__s(dc_s, "<none>"), old, node->processes);
             }
 
             if (!pcmk_is_set((node->processes ^ old), crm_get_cluster_proc())) {
                 /* Peer status did not change. This should not be possible,
                  * since we don't track process flags other than peer status.
                  */
                 crm_trace("Process flag %#7x did not change from %#7x to %#7x",
                           crm_get_cluster_proc(), old, node->processes);
                 return;
 
             }
 
             if (!appeared) {
                 node->peer_lost = time(NULL);
                 controld_remove_failed_sync_node(node->uname);
                 controld_remove_voter(node->uname);
             }
 
             if (!pcmk_is_set(controld_globals.fsa_input_register,
                              R_CIB_CONNECTED)) {
                 crm_trace("Ignoring peer status change because not connected to CIB");
                 return;
 
             } else if (controld_globals.fsa_state == S_STOPPING) {
                 crm_trace("Ignoring peer status change because stopping");
                 return;
             }
 
             if (!appeared
                 && pcmk__str_eq(node->uname, controld_globals.our_nodename,
                                 pcmk__str_casei)) {
                 /* Did we get evicted? */
                 crm_notice("Our peer connection failed");
                 register_fsa_input(C_CRMD_STATUS_CALLBACK, I_ERROR, NULL);
 
             } else if (pcmk__str_eq(node->uname, controld_globals.dc_name,
                                     pcmk__str_casei)
-                       && !crm_is_peer_active(node)) {
+                       && !pcmk__cluster_is_node_active(node)) {
                 /* Did the DC leave us? */
                 crm_notice("Our peer on the DC (%s) is dead",
                            controld_globals.dc_name);
                 register_fsa_input(C_CRMD_STATUS_CALLBACK, I_ELECTION, NULL);
 
                 /* @COMPAT DC < 1.1.13: If a DC shuts down normally, we don't
                  * want to fence it. Newer DCs will send their shutdown request
                  * to all peers, who will update the DC's expected state to
                  * down, thus avoiding fencing. We can safely erase the DC's
                  * transient attributes when it leaves in that case. However,
                  * the only way to avoid fencing older DCs is to leave the
                  * transient attributes intact until it rejoins.
                  */
                 if (compare_version(controld_globals.dc_version, "3.0.9") > 0) {
                     controld_delete_node_state(node->uname,
                                                controld_section_attrs,
                                                cib_scope_local);
                 }
 
             } else if (AM_I_DC
                        || pcmk_is_set(controld_globals.flags, controld_dc_left)
                        || (controld_globals.dc_name == NULL)) {
                 /* This only needs to be done once, so normally the DC should do
                  * it. However if there is no DC, every node must do it, since
                  * there is no other way to ensure some one node does it.
                  */
                 if (appeared) {
                     te_trigger_stonith_history_sync(FALSE);
                 } else {
                     controld_delete_node_state(node->uname,
                                                controld_section_attrs,
                                                cib_scope_local);
                 }
             }
             break;
     }
 
     if (AM_I_DC) {
         xmlNode *update = NULL;
         int flags = node_update_peer;
         int alive = node_alive(node);
         pcmk__graph_action_t *down = match_down_event(node->uuid);
 
         crm_trace("Alive=%d, appeared=%d, down=%d",
                   alive, appeared, (down? down->id : -1));
 
         if (appeared && (alive > 0) && !is_remote) {
             register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL);
         }
 
         if (down) {
             const char *task = crm_element_value(down->xml, PCMK_XA_OPERATION);
 
             if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
 
                 /* tengine_stonith_callback() confirms fence actions */
                 crm_trace("Updating CIB %s fencer reported fencing of %s complete",
                           (pcmk_is_set(down->flags, pcmk__graph_action_confirmed)? "after" : "before"), node->uname);
 
             } else if (!appeared && pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN,
                                                  pcmk__str_casei)) {
 
                 // Shutdown actions are immediately confirmed (i.e. no_wait)
                 if (!is_remote) {
                     flags |= node_update_join | node_update_expected;
                     crmd_peer_down(node, FALSE);
                     check_join_state(controld_globals.fsa_state, __func__);
                 }
                 if (alive >= 0) {
                     crm_info("%s of peer %s is in progress " CRM_XS " action=%d",
                              task, node->uname, down->id);
                 } else {
                     crm_notice("%s of peer %s is complete " CRM_XS " action=%d",
                                task, node->uname, down->id);
                     pcmk__update_graph(controld_globals.transition_graph, down);
                     trigger_graph();
                 }
 
             } else {
                 crm_trace("Node %s is %s, was expected to %s (op %d)",
                           node->uname,
                           ((alive > 0)? "alive" :
                            ((alive < 0)? "dead" : "partially alive")),
                           task, down->id);
             }
 
         } else if (appeared == FALSE) {
             if ((controld_globals.transition_graph == NULL)
                 || (controld_globals.transition_graph->id == -1)) {
                 crm_info("Stonith/shutdown of node %s is unknown to the "
                          "current DC", node->uname);
             } else {
                 crm_warn("Stonith/shutdown of node %s was not expected",
                          node->uname);
             }
             if (!is_remote) {
                 crm_update_peer_join(__func__, node, crm_join_none);
                 check_join_state(controld_globals.fsa_state, __func__);
             }
             abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                              "Node failure", NULL);
             fail_incompletable_actions(controld_globals.transition_graph,
                                        node->uuid);
 
         } else {
             crm_trace("Node %s came up, was not expected to be down",
                       node->uname);
         }
 
         if (is_remote) {
             /* A pacemaker_remote node won't have its cluster status updated
              * in the CIB by membership-layer callbacks, so do it here.
              */
             flags |= node_update_cluster;
 
             /* Trigger resource placement on newly integrated nodes */
             if (appeared) {
                 abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                                  "Pacemaker Remote node integrated", NULL);
             }
         }
 
         if (!appeared && (type == crm_status_processes)
             && (node->when_member > 1)) {
             /* The node left CPG but is still a cluster member. Set its
              * membership time to 1 to record it in the cluster state as a
              * boolean, so we don't fence it due to
              * PCMK_OPT_NODE_PENDING_TIMEOUT.
              */
             node->when_member = 1;
             flags |= node_update_cluster;
             controld_node_pending_timer(node);
         }
 
         /* Update the CIB node state */
         update = create_node_state_update(node, flags, NULL, __func__);
         if (update == NULL) {
             crm_debug("Node state update not yet possible for %s", node->uname);
         } else {
             fsa_cib_anon_update(PCMK_XE_STATUS, update);
         }
         free_xml(update);
     }
 
     controld_trigger_fsa();
 }
 
 gboolean
 crm_fsa_trigger(gpointer user_data)
 {
     crm_trace("Invoked (queue len: %d)",
               g_list_length(controld_globals.fsa_message_queue));
     s_crmd_fsa(C_FSA_INTERNAL);
     crm_trace("Exited  (queue len: %d)",
               g_list_length(controld_globals.fsa_message_queue));
     return TRUE;
 }
diff --git a/daemons/controld/controld_join_dc.c b/daemons/controld/controld_join_dc.c
index c928b65e84..bf06422361 100644
--- a/daemons/controld/controld_join_dc.c
+++ b/daemons/controld/controld_join_dc.c
@@ -1,1045 +1,1045 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 
 #include <crm/common/xml.h>
 #include <crm/cluster.h>
 
 #include <pacemaker-controld.h>
 
 static char *max_generation_from = NULL;
 static xmlNodePtr max_generation_xml = NULL;
 
 /*!
  * \internal
  * \brief Nodes from which a CIB sync has failed since the peer joined
  *
  * This table is of the form (<tt>node_name -> join_id</tt>). \p node_name is
  * the name of a client node from which a CIB \p sync_from() call has failed in
  * \p do_dc_join_finalize() since the client joined the cluster as a peer.
  * \p join_id is the ID of the join round in which the \p sync_from() failed,
  * and is intended for use in nack log messages.
  */
 static GHashTable *failed_sync_nodes = NULL;
 
 void finalize_join_for(gpointer key, gpointer value, gpointer user_data);
 void finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data);
 gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source);
 
 /* Numeric counter used to identify join rounds (an unsigned int would be
  * appropriate, except we get and set it in XML as int)
  */
 static int current_join_id = 0;
 
 /*!
  * \internal
  * \brief Destroy the hash table containing failed sync nodes
  */
 void
 controld_destroy_failed_sync_table(void)
 {
     if (failed_sync_nodes != NULL) {
         g_hash_table_destroy(failed_sync_nodes);
         failed_sync_nodes = NULL;
     }
 }
 
 /*!
  * \internal
  * \brief Remove a node from the failed sync nodes table if present
  *
  * \param[in] node_name  Node name to remove
  */
 void
 controld_remove_failed_sync_node(const char *node_name)
 {
     if (failed_sync_nodes != NULL) {
         g_hash_table_remove(failed_sync_nodes, (gchar *) node_name);
     }
 }
 
 /*!
  * \internal
  * \brief Add to a hash table a node whose CIB failed to sync
  *
  * \param[in] node_name  Name of node whose CIB failed to sync
  * \param[in] join_id    Join round when the failure occurred
  */
 static void
 record_failed_sync_node(const char *node_name, gint join_id)
 {
     if (failed_sync_nodes == NULL) {
         failed_sync_nodes = pcmk__strikey_table(g_free, NULL);
     }
 
     /* If the node is already in the table then we failed to nack it during the
      * filter offer step
      */
     CRM_LOG_ASSERT(g_hash_table_insert(failed_sync_nodes, g_strdup(node_name),
                                        GINT_TO_POINTER(join_id)));
 }
 
 /*!
  * \internal
  * \brief Look up a node name in the failed sync table
  *
  * \param[in]  node_name  Name of node to look up
  * \param[out] join_id    Where to store the join ID of when the sync failed
  *
  * \return Standard Pacemaker return code. Specifically, \p pcmk_rc_ok if the
  *         node name was found, or \p pcmk_rc_node_unknown otherwise.
  * \note \p *join_id is set to -1 if the node is not found.
  */
 static int
 lookup_failed_sync_node(const char *node_name, gint *join_id)
 {
     *join_id = -1;
 
     if (failed_sync_nodes != NULL) {
         gpointer result = g_hash_table_lookup(failed_sync_nodes,
                                               (gchar *) node_name);
         if (result != NULL) {
             *join_id = GPOINTER_TO_INT(result);
             return pcmk_rc_ok;
         }
     }
     return pcmk_rc_node_unknown;
 }
 
 void
 crm_update_peer_join(const char *source, crm_node_t * node, enum crm_join_phase phase)
 {
     enum crm_join_phase last = 0;
 
     CRM_CHECK(node != NULL, return);
 
     /* Remote nodes do not participate in joins */
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         return;
     }
 
     last = node->join;
 
     if(phase == last) {
         crm_trace("Node %s join-%d phase is still %s "
                   CRM_XS " nodeid=%u source=%s",
                   node->uname, current_join_id, crm_join_phase_str(last),
                   node->id, source);
 
     } else if ((phase <= crm_join_none) || (phase == (last + 1))) {
         node->join = phase;
         crm_trace("Node %s join-%d phase is now %s (was %s) "
                   CRM_XS " nodeid=%u source=%s",
                  node->uname, current_join_id, crm_join_phase_str(phase),
                  crm_join_phase_str(last), node->id, source);
 
     } else {
         crm_warn("Rejecting join-%d phase update for node %s because "
                  "can't go from %s to %s " CRM_XS " nodeid=%u source=%s",
                  current_join_id, node->uname, crm_join_phase_str(last),
                  crm_join_phase_str(phase), node->id, source);
     }
 }
 
 static void
 start_join_round(void)
 {
     GHashTableIter iter;
     crm_node_t *peer = NULL;
 
     crm_debug("Starting new join round join-%d", current_join_id);
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) {
         crm_update_peer_join(__func__, peer, crm_join_none);
     }
     if (max_generation_from != NULL) {
         free(max_generation_from);
         max_generation_from = NULL;
     }
     if (max_generation_xml != NULL) {
         free_xml(max_generation_xml);
         max_generation_xml = NULL;
     }
     controld_clear_fsa_input_flags(R_HAVE_CIB);
 }
 
 /*!
  * \internal
  * \brief Create a join message from the DC
  *
  * \param[in] join_op  Join operation name
  * \param[in] host_to  Recipient of message
  */
 static xmlNode *
 create_dc_message(const char *join_op, const char *host_to)
 {
     xmlNode *msg = create_request(join_op, NULL, host_to, CRM_SYSTEM_CRMD,
                                   CRM_SYSTEM_DC, NULL);
 
     /* Identify which election this is a part of */
     crm_xml_add_int(msg, PCMK__XA_JOIN_ID, current_join_id);
 
     /* Add a field specifying whether the DC is shutting down. This keeps the
      * joining node from fencing the old DC if it becomes the new DC.
      */
     pcmk__xe_set_bool_attr(msg, PCMK__XA_DC_LEAVING,
                            pcmk_is_set(controld_globals.fsa_input_register,
                                        R_SHUTDOWN));
     return msg;
 }
 
 static void
 join_make_offer(gpointer key, gpointer value, gpointer user_data)
 {
     xmlNode *offer = NULL;
     crm_node_t *member = (crm_node_t *)value;
 
     CRM_ASSERT(member != NULL);
-    if (crm_is_peer_active(member) == FALSE) {
+    if (!pcmk__cluster_is_node_active(member)) {
         crm_info("Not making join-%d offer to inactive node %s",
                  current_join_id,
                  (member->uname? member->uname : "with unknown name"));
         if(member->expected == NULL && pcmk__str_eq(member->state, CRM_NODE_LOST, pcmk__str_casei)) {
             /* You would think this unsafe, but in fact this plus an
              * active resource is what causes it to be fenced.
              *
              * Yes, this does mean that any node that dies at the same
              * time as the old DC and is not running resource (still)
              * won't be fenced.
              *
              * I'm not happy about this either.
              */
             pcmk__update_peer_expected(__func__, member, CRMD_JOINSTATE_DOWN);
         }
         return;
     }
 
     if (member->uname == NULL) {
         crm_info("Not making join-%d offer to node uuid %s with unknown name",
                  current_join_id, member->uuid);
         return;
     }
 
     if (controld_globals.membership_id != crm_peer_seq) {
         controld_globals.membership_id = crm_peer_seq;
         crm_info("Making join-%d offers based on membership event %llu",
                  current_join_id, crm_peer_seq);
     }
 
     if(user_data && member->join > crm_join_none) {
         crm_info("Not making join-%d offer to already known node %s (%s)",
                  current_join_id, member->uname,
                  crm_join_phase_str(member->join));
         return;
     }
 
     crm_update_peer_join(__func__, (crm_node_t*)member, crm_join_none);
 
     offer = create_dc_message(CRM_OP_JOIN_OFFER, member->uname);
 
     // Advertise our feature set so the joining node can bail if not compatible
     crm_xml_add(offer, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
 
     crm_info("Sending join-%d offer to %s", current_join_id, member->uname);
     send_cluster_message(member, crm_msg_crmd, offer, TRUE);
     free_xml(offer);
 
     crm_update_peer_join(__func__, member, crm_join_welcomed);
 }
 
 /*	 A_DC_JOIN_OFFER_ALL	*/
 void
 do_dc_join_offer_all(long long action,
                      enum crmd_fsa_cause cause,
                      enum crmd_fsa_state cur_state,
                      enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     int count;
 
     /* Reset everyone's status back to down or in_ccm in the CIB.
      * Any nodes that are active in the CIB but not in the cluster membership
      * will be seen as offline by the scheduler anyway.
      */
     current_join_id++;
     start_join_round();
 
     update_dc(NULL);
     if (cause == C_HA_MESSAGE && current_input == I_NODE_JOIN) {
         crm_info("A new node joined the cluster");
     }
     g_hash_table_foreach(crm_peer_cache, join_make_offer, NULL);
 
     count = crmd_join_phase_count(crm_join_welcomed);
     crm_info("Waiting on join-%d requests from %d outstanding node%s",
              current_join_id, count, pcmk__plural_s(count));
 
     // Don't waste time by invoking the scheduler yet
 }
 
 /*	 A_DC_JOIN_OFFER_ONE	*/
 void
 do_dc_join_offer_one(long long action,
                      enum crmd_fsa_cause cause,
                      enum crmd_fsa_state cur_state,
                      enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     crm_node_t *member;
     ha_msg_input_t *welcome = NULL;
     int count;
     const char *join_to = NULL;
 
     if (msg_data->data == NULL) {
         crm_info("Making join-%d offers to any unconfirmed nodes "
                  "because an unknown node joined", current_join_id);
         g_hash_table_foreach(crm_peer_cache, join_make_offer, &member);
         check_join_state(cur_state, __func__);
         return;
     }
 
     welcome = fsa_typed_data(fsa_dt_ha_msg);
     if (welcome == NULL) {
         // fsa_typed_data() already logged an error
         return;
     }
 
     join_to = crm_element_value(welcome->msg, PCMK__XA_SRC);
     if (join_to == NULL) {
         crm_err("Can't make join-%d offer to unknown node", current_join_id);
         return;
     }
     member = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster);
 
     /* It is possible that a node will have been sick or starting up when the
      * original offer was made. However, it will either re-announce itself in
      * due course, or we can re-store the original offer on the client.
      */
 
     crm_update_peer_join(__func__, member, crm_join_none);
     join_make_offer(NULL, member, NULL);
 
     /* If the offer isn't to the local node, make an offer to the local node as
      * well, to ensure the correct value for max_generation_from.
      */
     if (strcasecmp(join_to, controld_globals.our_nodename) != 0) {
         member = pcmk__get_node(0, controld_globals.our_nodename, NULL,
                                 pcmk__node_search_cluster);
         join_make_offer(NULL, member, NULL);
     }
 
     /* This was a genuine join request; cancel any existing transition and
      * invoke the scheduler.
      */
     abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart, "Node join",
                      NULL);
 
     count = crmd_join_phase_count(crm_join_welcomed);
     crm_info("Waiting on join-%d requests from %d outstanding node%s",
              current_join_id, count, pcmk__plural_s(count));
 
     // Don't waste time by invoking the scheduler yet
 }
 
 static int
 compare_int_fields(xmlNode * left, xmlNode * right, const char *field)
 {
     const char *elem_l = crm_element_value(left, field);
     const char *elem_r = crm_element_value(right, field);
 
     long long int_elem_l;
     long long int_elem_r;
 
     pcmk__scan_ll(elem_l, &int_elem_l, -1LL);
     pcmk__scan_ll(elem_r, &int_elem_r, -1LL);
 
     if (int_elem_l < int_elem_r) {
         return -1;
 
     } else if (int_elem_l > int_elem_r) {
         return 1;
     }
 
     return 0;
 }
 
 /*	 A_DC_JOIN_PROCESS_REQ	*/
 void
 do_dc_join_filter_offer(long long action,
                         enum crmd_fsa_cause cause,
                         enum crmd_fsa_state cur_state,
                         enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     xmlNode *generation = NULL;
 
     int cmp = 0;
     int join_id = -1;
     int count = 0;
     gint value = 0;
     gboolean ack_nack_bool = TRUE;
     ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg);
 
     const char *join_from = crm_element_value(join_ack->msg, PCMK__XA_SRC);
     const char *ref = crm_element_value(join_ack->msg, PCMK_XA_REFERENCE);
     const char *join_version = crm_element_value(join_ack->msg,
                                                  PCMK_XA_CRM_FEATURE_SET);
     crm_node_t *join_node = NULL;
 
     if (join_from == NULL) {
         crm_err("Ignoring invalid join request without node name");
         return;
     }
     join_node = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster);
 
     crm_element_value_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id);
     if (join_id != current_join_id) {
         crm_debug("Ignoring join-%d request from %s because we are on join-%d",
                   join_id, join_from, current_join_id);
         check_join_state(cur_state, __func__);
         return;
     }
 
     generation = join_ack->xml;
     if (max_generation_xml != NULL && generation != NULL) {
         int lpc = 0;
 
         const char *attributes[] = {
             PCMK_XA_ADMIN_EPOCH,
             PCMK_XA_EPOCH,
             PCMK_XA_NUM_UPDATES,
         };
 
         /* It's not obvious that join_ack->xml is the PCMK__XE_GENERATION_TUPLE
          * element from the join client. The "if" guard is for clarity.
          */
         if (pcmk__xe_is(generation, PCMK__XE_GENERATION_TUPLE)) {
             for (lpc = 0; cmp == 0 && lpc < PCMK__NELEM(attributes); lpc++) {
                 cmp = compare_int_fields(max_generation_xml, generation,
                                          attributes[lpc]);
             }
 
         } else {    // Should always be PCMK__XE_GENERATION_TUPLE
             CRM_LOG_ASSERT(false);
         }
     }
 
     if (ref == NULL) {
         ref = "none"; // for logging only
     }
 
     if (lookup_failed_sync_node(join_from, &value) == pcmk_rc_ok) {
         crm_err("Rejecting join-%d request from node %s because we failed to "
                 "sync its CIB in join-%d " CRM_XS " ref=%s",
                 join_id, join_from, value, ref);
         ack_nack_bool = FALSE;
 
-    } else if (!crm_is_peer_active(join_node)) {
+    } else if (!pcmk__cluster_is_node_active(join_node)) {
         if (match_down_event(join_from) != NULL) {
             /* The join request was received after the node was fenced or
              * otherwise shutdown in a way that we're aware of. No need to log
              * an error in this rare occurrence; we know the client was recently
              * shut down, and receiving a lingering in-flight request is not
              * cause for alarm.
              */
             crm_debug("Rejecting join-%d request from inactive node %s "
                       CRM_XS " ref=%s", join_id, join_from, ref);
         } else {
             crm_err("Rejecting join-%d request from inactive node %s "
                     CRM_XS " ref=%s", join_id, join_from, ref);
         }
         ack_nack_bool = FALSE;
 
     } else if (generation == NULL) {
         crm_err("Rejecting invalid join-%d request from node %s "
                 "missing CIB generation " CRM_XS " ref=%s",
                 join_id, join_from, ref);
         ack_nack_bool = FALSE;
 
     } else if ((join_version == NULL)
                || !feature_set_compatible(CRM_FEATURE_SET, join_version)) {
         crm_err("Rejecting join-%d request from node %s because feature set %s"
                 " is incompatible with ours (%s) " CRM_XS " ref=%s",
                 join_id, join_from, (join_version? join_version : "pre-3.1.0"),
                 CRM_FEATURE_SET, ref);
         ack_nack_bool = FALSE;
 
     } else if (max_generation_xml == NULL) {
         const char *validation = crm_element_value(generation,
                                                    PCMK_XA_VALIDATE_WITH);
 
         if (pcmk__get_schema(validation) == NULL) {
             crm_err("Rejecting join-%d request from %s (with first CIB "
                     "generation) due to unknown schema version %s "
                     CRM_XS " ref=%s",
                     join_id, join_from, pcmk__s(validation, "(missing)"), ref);
             ack_nack_bool = FALSE;
 
         } else {
             crm_debug("Accepting join-%d request from %s (with first CIB "
                       "generation) " CRM_XS " ref=%s",
                       join_id, join_from, ref);
             max_generation_xml = pcmk__xml_copy(NULL, generation);
             pcmk__str_update(&max_generation_from, join_from);
         }
 
     } else if ((cmp < 0)
                || ((cmp == 0)
                    && pcmk__str_eq(join_from, controld_globals.our_nodename,
                                    pcmk__str_casei))) {
         const char *validation = crm_element_value(generation,
                                                    PCMK_XA_VALIDATE_WITH);
 
         if (pcmk__get_schema(validation) == NULL) {
             crm_err("Rejecting join-%d request from %s (with better CIB "
                     "generation than current best from %s) due to unknown "
                     "schema version %s " CRM_XS " ref=%s",
                     join_id, join_from, max_generation_from,
                     pcmk__s(validation, "(missing)"), ref);
             ack_nack_bool = FALSE;
 
         } else {
             crm_debug("Accepting join-%d request from %s (with better CIB "
                       "generation than current best from %s) " CRM_XS " ref=%s",
                       join_id, join_from, max_generation_from, ref);
             crm_log_xml_debug(max_generation_xml, "Old max generation");
             crm_log_xml_debug(generation, "New max generation");
 
             free_xml(max_generation_xml);
             max_generation_xml = pcmk__xml_copy(NULL, join_ack->xml);
             pcmk__str_update(&max_generation_from, join_from);
         }
 
     } else {
         crm_debug("Accepting join-%d request from %s " CRM_XS " ref=%s",
                   join_id, join_from, ref);
     }
 
     if (!ack_nack_bool) {
         if (compare_version(join_version, "3.17.0") < 0) {
             /* Clients with CRM_FEATURE_SET < 3.17.0 may respawn infinitely
              * after a nack message, don't send one
              */
             crm_update_peer_join(__func__, join_node, crm_join_nack_quiet);
         } else {
             crm_update_peer_join(__func__, join_node, crm_join_nack);
         }
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_NACK);
 
     } else {
         crm_update_peer_join(__func__, join_node, crm_join_integrated);
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER);
     }
 
     count = crmd_join_phase_count(crm_join_integrated);
     crm_debug("%d node%s currently integrated in join-%d",
               count, pcmk__plural_s(count), join_id);
 
     if (check_join_state(cur_state, __func__) == FALSE) {
         // Don't waste time by invoking the scheduler yet
         count = crmd_join_phase_count(crm_join_welcomed);
         crm_debug("Waiting on join-%d requests from %d outstanding node%s",
                   join_id, count, pcmk__plural_s(count));
     }
 }
 
 /*	A_DC_JOIN_FINALIZE	*/
 void
 do_dc_join_finalize(long long action,
                     enum crmd_fsa_cause cause,
                     enum crmd_fsa_state cur_state,
                     enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     char *sync_from = NULL;
     int rc = pcmk_ok;
     int count_welcomed = crmd_join_phase_count(crm_join_welcomed);
     int count_finalizable = crmd_join_phase_count(crm_join_integrated)
                             + crmd_join_phase_count(crm_join_nack)
                             + crmd_join_phase_count(crm_join_nack_quiet);
 
     /* This we can do straight away and avoid clients timing us out
      *  while we compute the latest CIB
      */
     if (count_welcomed != 0) {
         crm_debug("Waiting on join-%d requests from %d outstanding node%s "
                   "before finalizing join", current_join_id, count_welcomed,
                   pcmk__plural_s(count_welcomed));
         crmd_join_phase_log(LOG_DEBUG);
         /* crmd_fsa_stall(FALSE); Needed? */
         return;
 
     } else if (count_finalizable == 0) {
         crm_debug("Finalization not needed for join-%d at the current time",
                   current_join_id);
         crmd_join_phase_log(LOG_DEBUG);
         check_join_state(controld_globals.fsa_state, __func__);
         return;
     }
 
     controld_clear_fsa_input_flags(R_HAVE_CIB);
     if (pcmk__str_eq(max_generation_from, controld_globals.our_nodename,
                      pcmk__str_null_matches|pcmk__str_casei)) {
         controld_set_fsa_input_flags(R_HAVE_CIB);
     }
 
     if (!controld_globals.transition_graph->complete) {
         crm_warn("Delaying join-%d finalization while transition in progress",
                  current_join_id);
         crmd_join_phase_log(LOG_DEBUG);
         crmd_fsa_stall(FALSE);
         return;
     }
 
     if (pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) {
         // Send our CIB out to everyone
         sync_from = pcmk__str_copy(controld_globals.our_nodename);
         crm_debug("Finalizing join-%d for %d node%s (sync'ing from local CIB)",
                   current_join_id, count_finalizable,
                   pcmk__plural_s(count_finalizable));
         crm_log_xml_debug(max_generation_xml, "Requested CIB version");
 
     } else {
         // Ask for the agreed best CIB
         sync_from = pcmk__str_copy(max_generation_from);
         crm_notice("Finalizing join-%d for %d node%s (sync'ing CIB from %s)",
                    current_join_id, count_finalizable,
                    pcmk__plural_s(count_finalizable), sync_from);
         crm_log_xml_notice(max_generation_xml, "Requested CIB version");
     }
     crmd_join_phase_log(LOG_DEBUG);
 
     rc = controld_globals.cib_conn->cmds->sync_from(controld_globals.cib_conn,
                                                     sync_from, NULL, cib_none);
     fsa_register_cib_callback(rc, sync_from, finalize_sync_callback);
 }
 
 void
 free_max_generation(void)
 {
     free(max_generation_from);
     max_generation_from = NULL;
 
     free_xml(max_generation_xml);
     max_generation_xml = NULL;
 }
 
 void
 finalize_sync_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     CRM_LOG_ASSERT(-EPERM != rc);
 
     if (rc != pcmk_ok) {
         const char *sync_from = (const char *) user_data;
 
         do_crm_log(((rc == -pcmk_err_old_data)? LOG_WARNING : LOG_ERR),
                    "Could not sync CIB from %s in join-%d: %s",
                    sync_from, current_join_id, pcmk_strerror(rc));
 
         if (rc != -pcmk_err_old_data) {
             record_failed_sync_node(sync_from, current_join_id);
         }
 
         /* restart the whole join process */
         register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION_DC, NULL, NULL,
                                __func__);
 
     } else if (!AM_I_DC) {
         crm_debug("Sync'ed CIB for join-%d but no longer DC", current_join_id);
 
     } else if (controld_globals.fsa_state != S_FINALIZE_JOIN) {
         crm_debug("Sync'ed CIB for join-%d but no longer in S_FINALIZE_JOIN "
                   "(%s)", current_join_id,
                   fsa_state2string(controld_globals.fsa_state));
 
     } else {
         controld_set_fsa_input_flags(R_HAVE_CIB);
 
         /* make sure dc_uuid is re-set to us */
         if (!check_join_state(controld_globals.fsa_state, __func__)) {
             int count_finalizable = 0;
 
             count_finalizable = crmd_join_phase_count(crm_join_integrated)
                                 + crmd_join_phase_count(crm_join_nack)
                                 + crmd_join_phase_count(crm_join_nack_quiet);
 
             crm_debug("Notifying %d node%s of join-%d results",
                       count_finalizable, pcmk__plural_s(count_finalizable),
                       current_join_id);
             g_hash_table_foreach(crm_peer_cache, finalize_join_for, NULL);
         }
     }
 }
 
 static void
 join_node_state_commit_callback(xmlNode *msg, int call_id, int rc,
                                 xmlNode *output, void *user_data)
 {
     const char *node = user_data;
 
     if (rc != pcmk_ok) {
         fsa_data_t *msg_data = NULL;    // for register_fsa_error() macro
 
         crm_crit("join-%d node history update (via CIB call %d) for node %s "
                  "failed: %s",
                  current_join_id, call_id, node, pcmk_strerror(rc));
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 
     crm_debug("join-%d node history update (via CIB call %d) for node %s "
               "complete",
               current_join_id, call_id, node);
     check_join_state(controld_globals.fsa_state, __func__);
 }
 
 /*	A_DC_JOIN_PROCESS_ACK	*/
 void
 do_dc_join_ack(long long action,
                enum crmd_fsa_cause cause,
                enum crmd_fsa_state cur_state,
                enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     int join_id = -1;
     ha_msg_input_t *join_ack = fsa_typed_data(fsa_dt_ha_msg);
 
     const char *op = crm_element_value(join_ack->msg, PCMK__XA_CRM_TASK);
     char *join_from = crm_element_value_copy(join_ack->msg, PCMK__XA_SRC);
     crm_node_t *peer = NULL;
 
     enum controld_section_e section = controld_section_lrm;
     char *xpath = NULL;
     xmlNode *state = join_ack->xml;
     xmlNode *execd_state = NULL;
 
     cib_t *cib = controld_globals.cib_conn;
     int rc = pcmk_ok;
 
     // Sanity checks
     if (join_from == NULL) {
         crm_warn("Ignoring message received without node identification");
         goto done;
     }
     if (op == NULL) {
         crm_warn("Ignoring message received from %s without task", join_from);
         goto done;
     }
 
     if (strcmp(op, CRM_OP_JOIN_CONFIRM)) {
         crm_debug("Ignoring '%s' message from %s while waiting for '%s'",
                   op, join_from, CRM_OP_JOIN_CONFIRM);
         goto done;
     }
 
     if (crm_element_value_int(join_ack->msg, PCMK__XA_JOIN_ID, &join_id) != 0) {
         crm_warn("Ignoring join confirmation from %s without valid join ID",
                  join_from);
         goto done;
     }
 
     peer = pcmk__get_node(0, join_from, NULL, pcmk__node_search_cluster);
     if (peer->join != crm_join_finalized) {
         crm_info("Ignoring out-of-sequence join-%d confirmation from %s "
                  "(currently %s not %s)",
                  join_id, join_from, crm_join_phase_str(peer->join),
                  crm_join_phase_str(crm_join_finalized));
         goto done;
     }
 
     if (join_id != current_join_id) {
         crm_err("Rejecting join-%d confirmation from %s "
                 "because currently on join-%d",
                 join_id, join_from, current_join_id);
         crm_update_peer_join(__func__, peer, crm_join_nack);
         goto done;
     }
 
     crm_update_peer_join(__func__, peer, crm_join_confirmed);
 
     /* Update CIB with node's current executor state. A new transition will be
      * triggered later, when the CIB manager notifies us of the change.
      *
      * The delete and modify requests are part of an atomic transaction.
      */
     rc = cib->cmds->init_transaction(cib);
     if (rc != pcmk_ok) {
         goto done;
     }
 
     // Delete relevant parts of node's current executor state from CIB
     if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
         section = controld_section_lrm_unlocked;
     }
     controld_node_state_deletion_strings(join_from, section, &xpath, NULL);
 
     rc = cib->cmds->remove(cib, xpath, NULL,
                            cib_scope_local
                            |cib_xpath
                            |cib_multiple
                            |cib_transaction);
     if (rc != pcmk_ok) {
         goto done;
     }
 
     // Update CIB with node's latest known executor state
     if (pcmk__str_eq(join_from, controld_globals.our_nodename,
                      pcmk__str_casei)) {
 
         // Use the latest possible state if processing our own join ack
         execd_state = controld_query_executor_state();
 
         if (execd_state != NULL) {
             crm_debug("Updating local node history for join-%d from query "
                       "result",
                       current_join_id);
             state = execd_state;
 
         } else {
             crm_warn("Updating local node history from join-%d confirmation "
                      "because query failed",
                      current_join_id);
         }
 
     } else {
         crm_debug("Updating node history for %s from join-%d confirmation",
                   join_from, current_join_id);
     }
 
     rc = cib->cmds->modify(cib, PCMK_XE_STATUS, state,
                            cib_scope_local|cib_can_create|cib_transaction);
     free_xml(execd_state);
     if (rc != pcmk_ok) {
         goto done;
     }
 
     // Commit the transaction
     rc = cib->cmds->end_transaction(cib, true, cib_scope_local);
     fsa_register_cib_callback(rc, join_from, join_node_state_commit_callback);
 
     if (rc > 0) {
         // join_from will be freed after callback
         join_from = NULL;
         rc = pcmk_ok;
     }
 
 done:
     if (rc != pcmk_ok) {
         crm_crit("join-%d node history update for node %s failed: %s",
                  current_join_id, join_from, pcmk_strerror(rc));
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
     free(join_from);
     free(xpath);
 }
 
 void
 finalize_join_for(gpointer key, gpointer value, gpointer user_data)
 {
     xmlNode *acknak = NULL;
     xmlNode *tmp1 = NULL;
     crm_node_t *join_node = value;
     const char *join_to = join_node->uname;
     bool integrated = false;
 
     switch (join_node->join) {
         case crm_join_integrated:
             integrated = true;
             break;
         case crm_join_nack:
         case crm_join_nack_quiet:
             break;
         default:
             crm_trace("Not updating non-integrated and non-nacked node %s (%s) "
                       "for join-%d", join_to,
                       crm_join_phase_str(join_node->join), current_join_id);
             return;
     }
 
     /* Update the <node> element with the node's name and UUID, in case they
      * weren't known before
      */
     crm_trace("Updating node name and UUID in CIB for %s", join_to);
     tmp1 = pcmk__xe_create(NULL, PCMK_XE_NODE);
     crm_xml_add(tmp1, PCMK_XA_ID, crm_peer_uuid(join_node));
     crm_xml_add(tmp1, PCMK_XA_UNAME, join_to);
     fsa_cib_anon_update(PCMK_XE_NODES, tmp1);
     free_xml(tmp1);
 
     if (join_node->join == crm_join_nack_quiet) {
         crm_trace("Not sending nack message to node %s with feature set older "
                   "than 3.17.0", join_to);
         return;
     }
 
     join_node = pcmk__get_node(0, join_to, NULL, pcmk__node_search_cluster);
-    if (!crm_is_peer_active(join_node)) {
+    if (!pcmk__cluster_is_node_active(join_node)) {
         /*
          * NACK'ing nodes that the membership layer doesn't know about yet
          * simply creates more churn
          *
          * Better to leave them waiting and let the join restart when
          * the new membership event comes in
          *
          * All other NACKs (due to versions etc) should still be processed
          */
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_PENDING);
         return;
     }
 
     // Acknowledge or nack node's join request
     crm_debug("%sing join-%d request from %s",
               integrated? "Acknowledg" : "Nack", current_join_id, join_to);
     acknak = create_dc_message(CRM_OP_JOIN_ACKNAK, join_to);
     pcmk__xe_set_bool_attr(acknak, CRM_OP_JOIN_ACKNAK, integrated);
 
     if (integrated) {
         // No change needed for a nacked node
         crm_update_peer_join(__func__, join_node, crm_join_finalized);
         pcmk__update_peer_expected(__func__, join_node, CRMD_JOINSTATE_MEMBER);
 
         /* Iterate through the remote peer cache and add information on which
          * node hosts each to the ACK message.  This keeps new controllers in
          * sync with what has already happened.
          */
         if (pcmk__cluster_num_remote_nodes() > 0) {
             GHashTableIter iter;
             crm_node_t *node = NULL;
             xmlNode *remotes = pcmk__xe_create(acknak, PCMK_XE_NODES);
 
             g_hash_table_iter_init(&iter, crm_remote_peer_cache);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
                 xmlNode *remote = NULL;
 
                 if (!node->conn_host) {
                     continue;
                 }
 
                 remote = pcmk__xe_create(remotes, PCMK_XE_NODE);
                 pcmk__xe_set_props(remote,
                                    PCMK_XA_ID, node->uname,
                                    PCMK__XA_NODE_STATE, node->state,
                                    PCMK__XA_CONNECTION_HOST, node->conn_host,
                                    NULL);
             }
         }
     }
     send_cluster_message(join_node, crm_msg_crmd, acknak, TRUE);
     free_xml(acknak);
     return;
 }
 
 gboolean
 check_join_state(enum crmd_fsa_state cur_state, const char *source)
 {
     static unsigned long long highest_seq = 0;
 
     if (controld_globals.membership_id != crm_peer_seq) {
         crm_debug("join-%d: Membership changed from %llu to %llu "
                   CRM_XS " highest=%llu state=%s for=%s",
                   current_join_id, controld_globals.membership_id, crm_peer_seq,
                   highest_seq, fsa_state2string(cur_state), source);
         if(highest_seq < crm_peer_seq) {
             /* Don't spam the FSA with duplicates */
             highest_seq = crm_peer_seq;
             register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL);
         }
 
     } else if (cur_state == S_INTEGRATION) {
         if (crmd_join_phase_count(crm_join_welcomed) == 0) {
             int count = crmd_join_phase_count(crm_join_integrated);
 
             crm_debug("join-%d: Integration of %d peer%s complete "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             register_fsa_input_before(C_FSA_INTERNAL, I_INTEGRATED, NULL);
             return TRUE;
         }
 
     } else if (cur_state == S_FINALIZE_JOIN) {
         if (!pcmk_is_set(controld_globals.fsa_input_register, R_HAVE_CIB)) {
             crm_debug("join-%d: Delaying finalization until we have CIB "
                       CRM_XS " state=%s for=%s",
                       current_join_id, fsa_state2string(cur_state), source);
             return TRUE;
 
         } else if (crmd_join_phase_count(crm_join_welcomed) != 0) {
             int count = crmd_join_phase_count(crm_join_welcomed);
 
             crm_debug("join-%d: Still waiting on %d welcomed node%s "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             crmd_join_phase_log(LOG_DEBUG);
 
         } else if (crmd_join_phase_count(crm_join_integrated) != 0) {
             int count = crmd_join_phase_count(crm_join_integrated);
 
             crm_debug("join-%d: Still waiting on %d integrated node%s "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             crmd_join_phase_log(LOG_DEBUG);
 
         } else if (crmd_join_phase_count(crm_join_finalized) != 0) {
             int count = crmd_join_phase_count(crm_join_finalized);
 
             crm_debug("join-%d: Still waiting on %d finalized node%s "
                       CRM_XS " state=%s for=%s",
                       current_join_id, count, pcmk__plural_s(count),
                       fsa_state2string(cur_state), source);
             crmd_join_phase_log(LOG_DEBUG);
 
         } else {
             crm_debug("join-%d: Complete " CRM_XS " state=%s for=%s",
                       current_join_id, fsa_state2string(cur_state), source);
             register_fsa_input_later(C_FSA_INTERNAL, I_FINALIZED, NULL);
             return TRUE;
         }
     }
 
     return FALSE;
 }
 
 void
 do_dc_join_final(long long action,
                  enum crmd_fsa_cause cause,
                  enum crmd_fsa_state cur_state,
                  enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     crm_debug("Ensuring DC, quorum and node attributes are up-to-date");
     crm_update_quorum(crm_have_quorum, TRUE);
 }
 
 int crmd_join_phase_count(enum crm_join_phase phase)
 {
     int count = 0;
     crm_node_t *peer;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) {
         if(peer->join == phase) {
             count++;
         }
     }
     return count;
 }
 
 void crmd_join_phase_log(int level)
 {
     crm_node_t *peer;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &peer)) {
         do_crm_log(level, "join-%d: %s=%s", current_join_id, peer->uname,
                    crm_join_phase_str(peer->join));
     }
 }
diff --git a/daemons/controld/controld_membership.c b/daemons/controld/controld_membership.c
index 759c280684..ece48688ee 100644
--- a/daemons/controld/controld_membership.c
+++ b/daemons/controld/controld_membership.c
@@ -1,463 +1,463 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 /* put these first so that uuid_t is defined without conflicts */
 #include <crm_internal.h>
 
 #include <string.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/cluster/internal.h>
 
 #include <pacemaker-controld.h>
 
 void post_cache_update(int instance);
 
 extern gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source);
 
 static void
 reap_dead_nodes(gpointer key, gpointer value, gpointer user_data)
 {
     crm_node_t *node = value;
 
-    if (crm_is_peer_active(node) == FALSE) {
+    if (!pcmk__cluster_is_node_active(node)) {
         crm_update_peer_join(__func__, node, crm_join_none);
 
         if(node && node->uname) {
             if (pcmk__str_eq(controld_globals.our_nodename, node->uname,
                              pcmk__str_casei)) {
                 crm_err("We're not part of the cluster anymore");
                 register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
 
             } else if (!AM_I_DC
                        && pcmk__str_eq(node->uname, controld_globals.dc_name,
                                        pcmk__str_casei)) {
                 crm_warn("Our DC node (%s) left the cluster", node->uname);
                 register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL);
             }
         }
 
         if ((controld_globals.fsa_state == S_INTEGRATION)
             || (controld_globals.fsa_state == S_FINALIZE_JOIN)) {
             check_join_state(controld_globals.fsa_state, __func__);
         }
         if ((node != NULL) && (node->uuid != NULL)) {
             fail_incompletable_actions(controld_globals.transition_graph,
                                        node->uuid);
         }
     }
 }
 
 void
 post_cache_update(int instance)
 {
     xmlNode *no_op = NULL;
 
     crm_peer_seq = instance;
     crm_debug("Updated cache after membership event %d.", instance);
 
     g_hash_table_foreach(crm_peer_cache, reap_dead_nodes, NULL);
     controld_set_fsa_input_flags(R_MEMBERSHIP);
 
     if (AM_I_DC) {
         populate_cib_nodes(node_update_quick | node_update_cluster | node_update_peer |
                            node_update_expected, __func__);
     }
 
     /*
      * If we lost nodes, we should re-check the election status
      * Safe to call outside of an election
      */
     controld_set_fsa_action_flags(A_ELECTION_CHECK);
     controld_trigger_fsa();
 
     /* Membership changed, remind everyone we're here.
      * This will aid detection of duplicate DCs
      */
     no_op = create_request(CRM_OP_NOOP, NULL, NULL, CRM_SYSTEM_CRMD,
                            AM_I_DC ? CRM_SYSTEM_DC : CRM_SYSTEM_CRMD, NULL);
     send_cluster_message(NULL, crm_msg_crmd, no_op, FALSE);
     free_xml(no_op);
 }
 
 static void
 crmd_node_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     fsa_data_t *msg_data = NULL;
 
     if (rc == pcmk_ok) {
         crm_trace("Node update %d complete", call_id);
 
     } else if(call_id < pcmk_ok) {
         crm_err("Node update failed: %s (%d)", pcmk_strerror(call_id), call_id);
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
 
     } else {
         crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc);
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 /*!
  * \internal
  * \brief Create an XML node state tag with updates
  *
  * \param[in,out] node    Node whose state will be used for update
  * \param[in]     flags   Bitmask of node_update_flags indicating what to update
  * \param[in,out] parent  XML node to contain update (or NULL)
  * \param[in]     source  Who requested the update (only used for logging)
  *
  * \return Pointer to created node state tag
  */
 xmlNode *
 create_node_state_update(crm_node_t *node, int flags, xmlNode *parent,
                          const char *source)
 {
     const char *value = NULL;
     xmlNode *node_state;
 
     if (!node->state) {
         crm_info("Node update for %s cancelled: no state, not seen yet", node->uname);
        return NULL;
     }
 
     node_state = pcmk__xe_create(parent, PCMK__XE_NODE_STATE);
 
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         pcmk__xe_set_bool_attr(node_state, PCMK_XA_REMOTE_NODE, true);
     }
 
     if (crm_xml_add(node_state, PCMK_XA_ID, crm_peer_uuid(node)) == NULL) {
         crm_info("Node update for %s cancelled: no ID", node->uname);
         free_xml(node_state);
         return NULL;
     }
 
     crm_xml_add(node_state, PCMK_XA_UNAME, node->uname);
 
     if ((flags & node_update_cluster) && node->state) {
         if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) {
             // A value 0 means the node is not a cluster member.
             crm_xml_add_ll(node_state, PCMK__XA_IN_CCM, node->when_member);
 
         } else {
             pcmk__xe_set_bool_attr(node_state, PCMK__XA_IN_CCM,
                                    pcmk__str_eq(node->state, CRM_NODE_MEMBER,
                                                 pcmk__str_casei));
         }
     }
 
     if (!pcmk_is_set(node->flags, crm_remote_node)) {
         if (flags & node_update_peer) {
             if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) {
                 // A value 0 means the peer is offline in CPG.
                 crm_xml_add_ll(node_state, PCMK_XA_CRMD, node->when_online);
 
             } else {
                 // @COMPAT DCs < 2.1.7 use online/offline rather than timestamp
                 value = PCMK_VALUE_OFFLINE;
                 if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
                     value = PCMK_VALUE_ONLINE;
                 }
                 crm_xml_add(node_state, PCMK_XA_CRMD, value);
             }
         }
 
         if (flags & node_update_join) {
             if (node->join <= crm_join_none) {
                 value = CRMD_JOINSTATE_DOWN;
             } else {
                 value = CRMD_JOINSTATE_MEMBER;
             }
             crm_xml_add(node_state, PCMK__XA_JOIN, value);
         }
 
         if (flags & node_update_expected) {
             crm_xml_add(node_state, PCMK_XA_EXPECTED, node->expected);
         }
     }
 
     crm_xml_add(node_state, PCMK_XA_CRM_DEBUG_ORIGIN, source);
 
     return node_state;
 }
 
 static void
 remove_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
                                  xmlNode * output, void *user_data)
 {
     char *node_uuid = user_data;
 
     do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
                         "Deletion of the unknown conflicting node \"%s\": %s (rc=%d)",
                         node_uuid, pcmk_strerror(rc), rc);
 }
 
 static void
 search_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
                                  xmlNode * output, void *user_data)
 {
     char *new_node_uuid = user_data;
     xmlNode *node_xml = NULL;
 
     if (rc != pcmk_ok) {
         if (rc != -ENXIO) {
             crm_notice("Searching conflicting nodes for %s failed: %s (%d)",
                        new_node_uuid, pcmk_strerror(rc), rc);
         }
         return;
 
     } else if (output == NULL) {
         return;
     }
 
     if (pcmk__xe_is(output, PCMK_XE_NODE)) {
         node_xml = output;
 
     } else {
         node_xml = pcmk__xe_first_child(output, PCMK_XE_NODE, NULL, NULL);
     }
 
     for (; node_xml != NULL; node_xml = pcmk__xe_next_same(node_xml)) {
         const char *node_uuid = NULL;
         const char *node_uname = NULL;
         GHashTableIter iter;
         crm_node_t *node = NULL;
         gboolean known = FALSE;
 
         node_uuid = crm_element_value(node_xml, PCMK_XA_ID);
         node_uname = crm_element_value(node_xml, PCMK_XA_UNAME);
 
         if (node_uuid == NULL || node_uname == NULL) {
             continue;
         }
 
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (node->uuid
                 && pcmk__str_eq(node->uuid, node_uuid, pcmk__str_casei)
                 && node->uname
                 && pcmk__str_eq(node->uname, node_uname, pcmk__str_casei)) {
 
                 known = TRUE;
                 break;
             }
         }
 
         if (known == FALSE) {
             cib_t *cib_conn = controld_globals.cib_conn;
             int delete_call_id = 0;
             xmlNode *node_state_xml = NULL;
 
             crm_notice("Deleting unknown node %s/%s which has conflicting uname with %s",
                        node_uuid, node_uname, new_node_uuid);
 
             delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_NODES,
                                                     node_xml, cib_scope_local);
             fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid),
                                       remove_conflicting_node_callback);
 
             node_state_xml = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE);
             crm_xml_add(node_state_xml, PCMK_XA_ID, node_uuid);
             crm_xml_add(node_state_xml, PCMK_XA_UNAME, node_uname);
 
             delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_STATUS,
                                                     node_state_xml,
                                                     cib_scope_local);
             fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid),
                                       remove_conflicting_node_callback);
             free_xml(node_state_xml);
         }
     }
 }
 
 static void
 node_list_update_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     fsa_data_t *msg_data = NULL;
 
     if(call_id < pcmk_ok) {
         crm_err("Node list update failed: %s (%d)", pcmk_strerror(call_id), call_id);
         crm_log_xml_debug(msg, "update:failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
 
     } else if(rc < pcmk_ok) {
         crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc);
         crm_log_xml_debug(msg, "update:failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 void
 populate_cib_nodes(enum node_update_flags flags, const char *source)
 {
     cib_t *cib_conn = controld_globals.cib_conn;
 
     int call_id = 0;
     gboolean from_hashtable = TRUE;
     xmlNode *node_list = pcmk__xe_create(NULL, PCMK_XE_NODES);
 
 #if SUPPORT_COROSYNC
     if (!pcmk_is_set(flags, node_update_quick) && is_corosync_cluster()) {
         from_hashtable = pcmk__corosync_add_nodes(node_list);
     }
 #endif
 
     if (from_hashtable) {
         GHashTableIter iter;
         crm_node_t *node = NULL;
         GString *xpath = NULL;
 
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             xmlNode *new_node = NULL;
 
             if ((node->uuid != NULL) && (node->uname != NULL)) {
                 crm_trace("Creating node entry for %s/%s", node->uname, node->uuid);
                 if (xpath == NULL) {
                     xpath = g_string_sized_new(512);
                 } else {
                     g_string_truncate(xpath, 0);
                 }
 
                 /* We need both to be valid */
                 new_node = pcmk__xe_create(node_list, PCMK_XE_NODE);
                 crm_xml_add(new_node, PCMK_XA_ID, node->uuid);
                 crm_xml_add(new_node, PCMK_XA_UNAME, node->uname);
 
                 /* Search and remove unknown nodes with the conflicting uname from CIB */
                 pcmk__g_strcat(xpath,
                                "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION
                                "/" PCMK_XE_NODES "/" PCMK_XE_NODE
                                "[@" PCMK_XA_UNAME "='", node->uname, "']"
                                "[@" PCMK_XA_ID "!='", node->uuid, "']", NULL);
 
                 call_id = cib_conn->cmds->query(cib_conn,
                                                 (const char *) xpath->str,
                                                 NULL,
                                                 cib_scope_local|cib_xpath);
                 fsa_register_cib_callback(call_id, pcmk__str_copy(node->uuid),
                                           search_conflicting_node_callback);
             }
         }
 
         if (xpath != NULL) {
             g_string_free(xpath, TRUE);
         }
     }
 
     crm_trace("Populating <nodes> section from %s", from_hashtable ? "hashtable" : "cluster");
 
     if ((controld_update_cib(PCMK_XE_NODES, node_list, cib_scope_local,
                              node_list_update_callback) == pcmk_rc_ok)
          && (crm_peer_cache != NULL) && AM_I_DC) {
         /*
          * There is no need to update the local CIB with our values if
          * we've not seen valid membership data
          */
         GHashTableIter iter;
         crm_node_t *node = NULL;
 
         free_xml(node_list);
         node_list = pcmk__xe_create(NULL, PCMK_XE_STATUS);
 
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             create_node_state_update(node, flags, node_list, source);
         }
 
         if (crm_remote_peer_cache) {
             g_hash_table_iter_init(&iter, crm_remote_peer_cache);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
                 create_node_state_update(node, flags, node_list, source);
             }
         }
 
         controld_update_cib(PCMK_XE_STATUS, node_list, cib_scope_local,
                             crmd_node_update_complete);
     }
     free_xml(node_list);
 }
 
 static void
 cib_quorum_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     fsa_data_t *msg_data = NULL;
 
     if (rc == pcmk_ok) {
         crm_trace("Quorum update %d complete", call_id);
 
     } else {
         crm_err("Quorum update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc);
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 void
 crm_update_quorum(gboolean quorum, gboolean force_update)
 {
     bool has_quorum = pcmk_is_set(controld_globals.flags, controld_has_quorum);
 
     if (quorum) {
         controld_set_global_flags(controld_ever_had_quorum);
 
     } else if (pcmk_all_flags_set(controld_globals.flags,
                                   controld_ever_had_quorum
                                   |controld_no_quorum_suicide)) {
         pcmk__panic(__func__);
     }
 
     if (AM_I_DC
         && ((has_quorum && !quorum) || (!has_quorum && quorum)
             || force_update)) {
         xmlNode *update = NULL;
 
         update = pcmk__xe_create(NULL, PCMK_XE_CIB);
         crm_xml_add_int(update, PCMK_XA_HAVE_QUORUM, quorum);
         crm_xml_add(update, PCMK_XA_DC_UUID, controld_globals.our_uuid);
 
         crm_debug("Updating quorum status to %s", pcmk__btoa(quorum));
         controld_update_cib(PCMK_XE_CIB, update, cib_scope_local,
                             cib_quorum_update_complete);
         free_xml(update);
 
         /* Quorum changes usually cause a new transition via other activity:
          * quorum gained via a node joining will abort via the node join,
          * and quorum lost via a node leaving will usually abort via resource
          * activity and/or fencing.
          *
          * However, it is possible that nothing else causes a transition (e.g.
          * someone forces quorum via corosync-cmaptcl, or quorum is lost due to
          * a node in standby shutting down cleanly), so here ensure a new
          * transition is triggered.
          */
         if (quorum) {
             /* If quorum was gained, abort after a short delay, in case multiple
              * nodes are joining around the same time, so the one that brings us
              * to quorum doesn't cause all the remaining ones to be fenced.
              */
             abort_after_delay(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                               "Quorum gained", 5000);
         } else {
             abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                              "Quorum lost", NULL);
         }
     }
 
     if (quorum) {
         controld_set_global_flags(controld_has_quorum);
     } else {
         controld_clear_global_flags(controld_has_quorum);
     }
 }
diff --git a/include/crm/cluster/internal.h b/include/crm/cluster/internal.h
index 7db7867833..c46377e6e3 100644
--- a/include/crm/cluster/internal.h
+++ b/include/crm/cluster/internal.h
@@ -1,159 +1,159 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_CLUSTER_INTERNAL__H
 #  define PCMK__CRM_CLUSTER_INTERNAL__H
 
 #  include <stdint.h>       // uint32_t, uint64_t
 
 #  include <glib.h>         // gboolean
 
 #  include <crm/cluster.h>
 
 /* *INDENT-OFF* */
 enum crm_proc_flag {
     crm_proc_none       = 0x00000001,
 
     // Cluster layers
     crm_proc_cpg        = 0x04000000,
 
     // Daemons
     crm_proc_execd      = 0x00000010,
     crm_proc_based      = 0x00000100,
     crm_proc_controld   = 0x00000200,
     crm_proc_attrd      = 0x00001000,
     crm_proc_schedulerd = 0x00010000,
     crm_proc_fenced     = 0x00100000,
 };
 /* *INDENT-ON* */
 
 // Used with node cache search functions
 enum pcmk__node_search_flags {
     pcmk__node_search_none      = 0,
     pcmk__node_search_cluster   = (1 << 0), // Search for cluster nodes
     pcmk__node_search_remote    = (1 << 1), // Search for remote nodes
     pcmk__node_search_any       = pcmk__node_search_cluster
                                   |pcmk__node_search_remote,
 
     /* @COMPAT The values before this must stay the same until we can drop
      * support for enum crm_get_peer_flags
      */
 
     pcmk__node_search_known     = (1 << 2), // Search previously known nodes
 };
 
 /*!
  * \internal
  * \brief Return the process bit corresponding to the current cluster stack
  *
  * \return Process flag if detectable, otherwise 0
  */
 static inline uint32_t
 crm_get_cluster_proc(void)
 {
     switch (get_cluster_type()) {
         case pcmk_cluster_corosync:
             return crm_proc_cpg;
 
         default:
             break;
     }
     return crm_proc_none;
 }
 
 /*!
  * \internal
  * \brief Get log-friendly string description of a Corosync return code
  *
  * \param[in] error  Corosync return code
  *
  * \return Log-friendly string description corresponding to \p error
  */
 static inline const char *
 pcmk__cs_err_str(int error)
 {
 #  if SUPPORT_COROSYNC
     switch (error) {
         case CS_OK:                         return "OK";
         case CS_ERR_LIBRARY:                return "Library error";
         case CS_ERR_VERSION:                return "Version error";
         case CS_ERR_INIT:                   return "Initialization error";
         case CS_ERR_TIMEOUT:                return "Timeout";
         case CS_ERR_TRY_AGAIN:              return "Try again";
         case CS_ERR_INVALID_PARAM:          return "Invalid parameter";
         case CS_ERR_NO_MEMORY:              return "No memory";
         case CS_ERR_BAD_HANDLE:             return "Bad handle";
         case CS_ERR_BUSY:                   return "Busy";
         case CS_ERR_ACCESS:                 return "Access error";
         case CS_ERR_NOT_EXIST:              return "Doesn't exist";
         case CS_ERR_NAME_TOO_LONG:          return "Name too long";
         case CS_ERR_EXIST:                  return "Exists";
         case CS_ERR_NO_SPACE:               return "No space";
         case CS_ERR_INTERRUPT:              return "Interrupt";
         case CS_ERR_NAME_NOT_FOUND:         return "Name not found";
         case CS_ERR_NO_RESOURCES:           return "No resources";
         case CS_ERR_NOT_SUPPORTED:          return "Not supported";
         case CS_ERR_BAD_OPERATION:          return "Bad operation";
         case CS_ERR_FAILED_OPERATION:       return "Failed operation";
         case CS_ERR_MESSAGE_ERROR:          return "Message error";
         case CS_ERR_QUEUE_FULL:             return "Queue full";
         case CS_ERR_QUEUE_NOT_AVAILABLE:    return "Queue not available";
         case CS_ERR_BAD_FLAGS:              return "Bad flags";
         case CS_ERR_TOO_BIG:                return "Too big";
         case CS_ERR_NO_SECTIONS:            return "No sections";
     }
 #  endif
     return "Corosync error";
 }
 
 #  if SUPPORT_COROSYNC
 
 #if 0
 /* This is the new way to do it, but we still support all Corosync 2 versions,
  * and this isn't always available. A better alternative here would be to check
  * for support in the configure script and enable this conditionally.
  */
 #define pcmk__init_cmap(handle) cmap_initialize_map((handle), CMAP_MAP_ICMAP)
 #else
 #define pcmk__init_cmap(handle) cmap_initialize(handle)
 #endif
 
 char *pcmk__corosync_cluster_name(void);
 bool pcmk__corosync_add_nodes(xmlNode *xml_parent);
 #  endif
 
 crm_node_t *crm_update_peer_proc(const char *source, crm_node_t * peer,
                                  uint32_t flag, const char *status);
 crm_node_t *pcmk__update_peer_state(const char *source, crm_node_t *node,
                                     const char *state, uint64_t membership);
 
 void pcmk__update_peer_expected(const char *source, crm_node_t *node,
                                 const char *expected);
 void pcmk__reap_unseen_nodes(uint64_t ring_id);
 
 void pcmk__corosync_quorum_connect(gboolean (*dispatch)(unsigned long long,
                                                         gboolean),
                                    void (*destroy) (gpointer));
 
+bool pcmk__cluster_is_node_active(const crm_node_t *node);
 unsigned int pcmk__cluster_num_remote_nodes(void);
-
 crm_node_t *pcmk__cluster_lookup_remote_node(const char *node_name);
 void pcmk__cluster_forget_remote_node(const char *node_name);
 crm_node_t *pcmk__search_node_caches(unsigned int id, const char *uname,
                                      uint32_t flags);
 crm_node_t *pcmk__search_cluster_node_cache(unsigned int id, const char *uname,
                                             const char *uuid);
 void pcmk__purge_node_from_cache(const char *node_name, uint32_t node_id);
 
 void pcmk__refresh_node_caches_from_cib(xmlNode *cib);
 
 crm_node_t *pcmk__get_node(unsigned int id, const char *uname,
                            const char *uuid, uint32_t flags);
 
 #endif // PCMK__CRM_CLUSTER_INTERNAL__H
diff --git a/lib/cluster/election.c b/lib/cluster/election.c
index 3f5ba322e6..ec9a6f6f56 100644
--- a/lib/cluster/election.c
+++ b/lib/cluster/election.c
@@ -1,723 +1,723 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/time.h>
 #include <sys/resource.h>
 
 #include <crm/common/xml.h>
 
 #include <crm/common/mainloop.h>
 #include <crm/cluster/internal.h>
 #include <crm/cluster/election_internal.h>
 #include <crm/crm.h>
 
 #define STORM_INTERVAL   2      /* in seconds */
 
 struct election_s {
     enum election_result state;
     guint count;        // How many times local node has voted
     char *name;         // Descriptive name for this election
     char *uname;        // Local node's name
     GSourceFunc cb;     // Function to call if election is won
     GHashTable *voted;  // Key = node name, value = how node voted
     mainloop_timer_t *timeout; // When to abort if all votes not received
     int election_wins;         // Track wins, for storm detection
     bool wrote_blackbox;       // Write a storm blackbox at most once
     time_t expires;            // When storm detection period ends
     time_t last_election_loss; // When dampening period ends
 };
 
 static void
 election_complete(election_t *e)
 {
     e->state = election_won;
     if (e->cb != NULL) {
         e->cb(e);
     }
     election_reset(e);
 }
 
 static gboolean
 election_timer_cb(gpointer user_data)
 {
     election_t *e = user_data;
 
     crm_info("%s timed out, declaring local node as winner", e->name);
     election_complete(e);
     return FALSE;
 }
 
 /*!
  * \brief Get current state of an election
  *
  * \param[in] e  Election object
  *
  * \return Current state of \e
  */
 enum election_result
 election_state(const election_t *e)
 {
     return (e == NULL)? election_error : e->state;
 }
 
 /*!
  * \brief Create a new election object
  *
  * Every node that wishes to participate in an election must create an election
  * object. Typically, this should be done once, at start-up. A caller should
  * only create a single election object.
  *
  * \param[in] name       Label for election (for logging)
  * \param[in] uname      Local node's name
  * \param[in] period_ms  How long to wait for all peers to vote
  * \param[in] cb         Function to call if local node wins election
  *
  * \return Newly allocated election object on success, NULL on error
  * \note The caller is responsible for freeing the returned value using
  *       election_fini().
  */
 election_t *
 election_init(const char *name, const char *uname, guint period_ms, GSourceFunc cb)
 {
     election_t *e = NULL;
 
     static guint count = 0;
 
     CRM_CHECK(uname != NULL, return NULL);
 
     e = calloc(1, sizeof(election_t));
     if (e == NULL) {
         crm_perror(LOG_CRIT, "Cannot create election");
         return NULL;
     }
 
     e->uname = strdup(uname);
     if (e->uname == NULL) {
         crm_perror(LOG_CRIT, "Cannot create election");
         free(e);
         return NULL;
     }
 
     e->name = name? crm_strdup_printf("election-%s", name)
                   : crm_strdup_printf("election-%u", count++);
     e->cb = cb;
     e->timeout = mainloop_timer_add(e->name, period_ms, FALSE,
                                     election_timer_cb, e);
     crm_trace("Created %s", e->name);
     return e;
 }
 
 /*!
  * \brief Disregard any previous vote by specified peer
  *
  * This discards any recorded vote from a specified peer. Election users should
  * call this whenever a voting peer becomes inactive.
  *
  * \param[in,out] e      Election object
  * \param[in]     uname  Name of peer to disregard
  */
 void
 election_remove(election_t *e, const char *uname)
 {
     if ((e != NULL) && (uname != NULL) && (e->voted != NULL)) {
         crm_trace("Discarding %s (no-)vote from lost peer %s", e->name, uname);
         g_hash_table_remove(e->voted, uname);
     }
 }
 
 /*!
  * \brief Stop election timer and disregard all votes
  *
  * \param[in,out] e  Election object
  */
 void
 election_reset(election_t *e)
 {
     if (e != NULL) {
         crm_trace("Resetting election %s", e->name);
         mainloop_timer_stop(e->timeout);
         if (e->voted) {
             crm_trace("Destroying voted cache with %d members", g_hash_table_size(e->voted));
             g_hash_table_destroy(e->voted);
             e->voted = NULL;
         }
     }
 }
 
 /*!
  * \brief Free an election object
  *
  * Free all memory associated with an election object, stopping its
  * election timer (if running).
  *
  * \param[in,out] e  Election object
  */
 void
 election_fini(election_t *e)
 {
     if (e != NULL) {
         election_reset(e);
         crm_trace("Destroying %s", e->name);
         mainloop_timer_del(e->timeout);
         free(e->uname);
         free(e->name);
         free(e);
     }
 }
 
 static void
 election_timeout_start(election_t *e)
 {
     if (e != NULL) {
         mainloop_timer_start(e->timeout);
     }
 }
 
 /*!
  * \brief Stop an election's timer, if running
  *
  * \param[in,out] e  Election object
  */
 void
 election_timeout_stop(election_t *e)
 {
     if (e != NULL) {
         mainloop_timer_stop(e->timeout);
     }
 }
 
 /*!
  * \brief Change an election's timeout (restarting timer if running)
  *
  * \param[in,out] e       Election object
  * \param[in]     period  New timeout
  */
 void
 election_timeout_set_period(election_t *e, guint period)
 {
     if (e != NULL) {
         mainloop_timer_set_period(e->timeout, period);
     } else {
         crm_err("No election defined");
     }
 }
 
 static int
 get_uptime(struct timeval *output)
 {
     static time_t expires = 0;
     static struct rusage info;
 
     time_t tm_now = time(NULL);
 
     if (expires < tm_now) {
         int rc = 0;
 
         info.ru_utime.tv_sec = 0;
         info.ru_utime.tv_usec = 0;
         rc = getrusage(RUSAGE_SELF, &info);
 
         output->tv_sec = 0;
         output->tv_usec = 0;
 
         if (rc < 0) {
             crm_perror(LOG_ERR, "Could not calculate the current uptime");
             expires = 0;
             return -1;
         }
 
         crm_debug("Current CPU usage is: %lds, %ldus", (long)info.ru_utime.tv_sec,
                   (long)info.ru_utime.tv_usec);
     }
 
     expires = tm_now + STORM_INTERVAL;  /* N seconds after the last _access_ */
     output->tv_sec = info.ru_utime.tv_sec;
     output->tv_usec = info.ru_utime.tv_usec;
 
     return 1;
 }
 
 static int
 compare_age(struct timeval your_age)
 {
     struct timeval our_age;
 
     get_uptime(&our_age); /* If an error occurred, our_age will be compared as {0,0} */
 
     if (our_age.tv_sec > your_age.tv_sec) {
         crm_debug("Win: %ld vs %ld (seconds)", (long)our_age.tv_sec, (long)your_age.tv_sec);
         return 1;
     } else if (our_age.tv_sec < your_age.tv_sec) {
         crm_debug("Lose: %ld vs %ld (seconds)", (long)our_age.tv_sec, (long)your_age.tv_sec);
         return -1;
     } else if (our_age.tv_usec > your_age.tv_usec) {
         crm_debug("Win: %ld.%06ld vs %ld.%06ld (usec)",
                   (long)our_age.tv_sec, (long)our_age.tv_usec, (long)your_age.tv_sec, (long)your_age.tv_usec);
         return 1;
     } else if (our_age.tv_usec < your_age.tv_usec) {
         crm_debug("Lose: %ld.%06ld vs %ld.%06ld (usec)",
                   (long)our_age.tv_sec, (long)our_age.tv_usec, (long)your_age.tv_sec, (long)your_age.tv_usec);
         return -1;
     }
 
     return 0;
 }
 
 /*!
  * \brief Start a new election by offering local node's candidacy
  *
  * Broadcast a "vote" election message containing the local node's ID,
  * (incremented) election counter, and uptime, and start the election timer.
  *
  * \param[in,out] e  Election object
  *
  * \note Any nodes agreeing to the candidacy will send a "no-vote" reply, and if
  *       all active peers do so, or if the election times out, the local node
  *       wins the election. (If we lose to any peer vote, we will stop the
  *       timer, so a timeout means we did not lose -- either some peer did not
  *       vote, or we did not call election_check() in time.)
  */
 void
 election_vote(election_t *e)
 {
     struct timeval age;
     xmlNode *vote = NULL;
     crm_node_t *our_node;
 
     if (e == NULL) {
         crm_trace("Election vote requested, but no election available");
         return;
     }
 
     our_node = pcmk__get_node(0, e->uname, NULL, pcmk__node_search_cluster);
-    if ((our_node == NULL) || (crm_is_peer_active(our_node) == FALSE)) {
+    if (!pcmk__cluster_is_node_active(our_node)) {
         crm_trace("Cannot vote in %s yet: local node not connected to cluster",
                   e->name);
         return;
     }
 
     election_reset(e);
     e->state = election_in_progress;
     vote = create_request(CRM_OP_VOTE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
 
     e->count++;
     crm_xml_add(vote, PCMK__XA_ELECTION_OWNER, our_node->uuid);
     crm_xml_add_int(vote, PCMK__XA_ELECTION_ID, e->count);
 
     // Warning: PCMK__XA_ELECTION_AGE_NANO_SEC value is actually microseconds
     get_uptime(&age);
     crm_xml_add_timeval(vote, PCMK__XA_ELECTION_AGE_SEC,
                         PCMK__XA_ELECTION_AGE_NANO_SEC, &age);
 
     send_cluster_message(NULL, crm_msg_crmd, vote, TRUE);
     free_xml(vote);
 
     crm_debug("Started %s round %d", e->name, e->count);
     election_timeout_start(e);
     return;
 }
 
 /*!
  * \brief Check whether local node has won an election
  *
  * If all known peers have sent no-vote messages, stop the election timer, set
  * the election state to won, and call any registered win callback.
  *
  * \param[in,out] e  Election object
  *
  * \return TRUE if local node has won, FALSE otherwise
  * \note If all known peers have sent no-vote messages, but the election owner
  *       does not call this function, the election will not be won (and the
  *       callback will not be called) until the election times out.
  * \note This should be called when election_count_vote() returns
  *       \c election_in_progress.
  */
 bool
 election_check(election_t *e)
 {
     int voted_size = 0;
     int num_members = 0;
 
     if (e == NULL) {
         crm_trace("Election check requested, but no election available");
         return FALSE;
     }
     if (e->voted == NULL) {
         crm_trace("%s check requested, but no votes received yet", e->name);
         return FALSE;
     }
 
     voted_size = g_hash_table_size(e->voted);
     num_members = crm_active_peers();
 
     /* in the case of #voted > #members, it is better to
      *   wait for the timeout and give the cluster time to
      *   stabilize
      */
     if (voted_size >= num_members) {
         /* we won and everyone has voted */
         election_timeout_stop(e);
         if (voted_size > num_members) {
             GHashTableIter gIter;
             const crm_node_t *node;
             char *key = NULL;
 
             crm_warn("Received too many votes in %s", e->name);
             g_hash_table_iter_init(&gIter, crm_peer_cache);
             while (g_hash_table_iter_next(&gIter, NULL, (gpointer *) & node)) {
-                if (crm_is_peer_active(node)) {
+                if (pcmk__cluster_is_node_active(node)) {
                     crm_warn("* expected vote: %s", node->uname);
                 }
             }
 
             g_hash_table_iter_init(&gIter, e->voted);
             while (g_hash_table_iter_next(&gIter, (gpointer *) & key, NULL)) {
                 crm_warn("* actual vote: %s", key);
             }
 
         }
 
         crm_info("%s won by local node", e->name);
         election_complete(e);
         return TRUE;
 
     } else {
         crm_debug("%s still waiting on %d of %d votes",
                   e->name, num_members - voted_size, num_members);
     }
 
     return FALSE;
 }
 
 #define LOSS_DAMPEN 2           /* in seconds */
 
 struct vote {
     const char *op;
     const char *from;
     const char *version;
     const char *election_owner;
     int election_id;
     struct timeval age;
 };
 
 /*!
  * \brief Unpack an election message
  *
  * \param[in] e        Election object (for logging only)
  * \param[in] message  Election message XML
  * \param[out] vote    Parsed fields from message
  *
  * \return TRUE if election message and election are valid, FALSE otherwise
  * \note The parsed struct's pointer members are valid only for the lifetime of
  *       the message argument.
  */
 static bool
 parse_election_message(const election_t *e, const xmlNode *message,
                        struct vote *vote)
 {
     CRM_CHECK(message && vote, return FALSE);
 
     vote->election_id = -1;
     vote->age.tv_sec = -1;
     vote->age.tv_usec = -1;
 
     vote->op = crm_element_value(message, PCMK__XA_CRM_TASK);
     vote->from = crm_element_value(message, PCMK__XA_SRC);
     vote->version = crm_element_value(message, PCMK_XA_VERSION);
     vote->election_owner = crm_element_value(message, PCMK__XA_ELECTION_OWNER);
 
     crm_element_value_int(message, PCMK__XA_ELECTION_ID, &(vote->election_id));
 
     if ((vote->op == NULL) || (vote->from == NULL) || (vote->version == NULL)
         || (vote->election_owner == NULL) || (vote->election_id < 0)) {
 
         crm_warn("Invalid %s message from %s in %s ",
                  (vote->op? vote->op : "election"),
                  (vote->from? vote->from : "unspecified node"),
                  (e? e->name : "election"));
         return FALSE;
     }
 
     // Op-specific validation
 
     if (pcmk__str_eq(vote->op, CRM_OP_VOTE, pcmk__str_none)) {
         /* Only vote ops have uptime.
            Warning: PCMK__XA_ELECTION_AGE_NANO_SEC value is in microseconds.
          */
         crm_element_value_timeval(message, PCMK__XA_ELECTION_AGE_SEC,
                                   PCMK__XA_ELECTION_AGE_NANO_SEC, &(vote->age));
         if ((vote->age.tv_sec < 0) || (vote->age.tv_usec < 0)) {
             crm_warn("Cannot count %s %s from %s because it is missing uptime",
                      (e? e->name : "election"), vote->op, vote->from);
             return FALSE;
         }
 
     } else if (!pcmk__str_eq(vote->op, CRM_OP_NOVOTE, pcmk__str_none)) {
         crm_info("Cannot process %s message from %s because %s is not a known election op",
                  (e? e->name : "election"), vote->from, vote->op);
         return FALSE;
     }
 
     // Election validation
 
     if (e == NULL) {
         crm_info("Cannot count %s from %s because no election available",
                  vote->op, vote->from);
         return FALSE;
     }
 
     /* If the membership cache is NULL, we REALLY shouldn't be voting --
      * the question is how we managed to get here.
      */
     if (crm_peer_cache == NULL) {
         crm_info("Cannot count %s %s from %s because no peer information available",
                  e->name, vote->op, vote->from);
         return FALSE;
     }
     return TRUE;
 }
 
 static void
 record_vote(election_t *e, struct vote *vote)
 {
     CRM_ASSERT(e && vote && vote->from && vote->op);
 
     if (e->voted == NULL) {
         e->voted = pcmk__strkey_table(free, free);
     }
     pcmk__insert_dup(e->voted, vote->from, vote->op);
 }
 
 static void
 send_no_vote(crm_node_t *peer, struct vote *vote)
 {
     // @TODO probably shouldn't hardcode CRM_SYSTEM_CRMD and crm_msg_crmd
 
     xmlNode *novote = create_request(CRM_OP_NOVOTE, NULL, vote->from,
                                      CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
 
     crm_xml_add(novote, PCMK__XA_ELECTION_OWNER, vote->election_owner);
     crm_xml_add_int(novote, PCMK__XA_ELECTION_ID, vote->election_id);
 
     send_cluster_message(peer, crm_msg_crmd, novote, TRUE);
     free_xml(novote);
 }
 
 /*!
  * \brief Process an election message (vote or no-vote) from a peer
  *
  * \param[in,out] e        Election object
  * \param[in]     message  Election message XML from peer
  * \param[in]     can_win  Whether local node is eligible to win
  *
  * \return Election state after new vote is considered
  * \note If the peer message is a vote, and we prefer the peer to win, this will
  *       send a no-vote reply to the peer.
  * \note The situations "we lost to this vote" from "this is a late no-vote
  *       after we've already lost" both return election_lost. If a caller needs
  *       to distinguish them, it should save the current state before calling
  *       this function, and then compare the result.
  */
 enum election_result
 election_count_vote(election_t *e, const xmlNode *message, bool can_win)
 {
     int log_level = LOG_INFO;
     gboolean done = FALSE;
     gboolean we_lose = FALSE;
     const char *reason = "unknown";
     bool we_are_owner = FALSE;
     crm_node_t *our_node = NULL, *your_node = NULL;
     time_t tm_now = time(NULL);
     struct vote vote;
 
     CRM_CHECK(message != NULL, return election_error);
     if (parse_election_message(e, message, &vote) == FALSE) {
         return election_error;
     }
 
     your_node = pcmk__get_node(0, vote.from, NULL, pcmk__node_search_cluster);
     our_node = pcmk__get_node(0, e->uname, NULL, pcmk__node_search_cluster);
     we_are_owner = (our_node != NULL)
                    && pcmk__str_eq(our_node->uuid, vote.election_owner,
                                    pcmk__str_none);
 
     if (!can_win) {
         reason = "Not eligible";
         we_lose = TRUE;
 
-    } else if (our_node == NULL || crm_is_peer_active(our_node) == FALSE) {
+    } else if (!pcmk__cluster_is_node_active(our_node)) {
         reason = "We are not part of the cluster";
         log_level = LOG_ERR;
         we_lose = TRUE;
 
     } else if (we_are_owner && (vote.election_id != e->count)) {
         log_level = LOG_TRACE;
         reason = "Superseded";
         done = TRUE;
 
-    } else if (your_node == NULL || crm_is_peer_active(your_node) == FALSE) {
+    } else if (!pcmk__cluster_is_node_active(your_node)) {
         /* Possibly we cached the message in the FSA queue at a point that it wasn't */
         reason = "Peer is not part of our cluster";
         log_level = LOG_WARNING;
         done = TRUE;
 
     } else if (pcmk__str_eq(vote.op, CRM_OP_NOVOTE, pcmk__str_none)
                || pcmk__str_eq(vote.from, e->uname, pcmk__str_none)) {
         /* Receiving our own broadcast vote, or a no-vote from peer, is a vote
          * for us to win
          */
         if (!we_are_owner) {
             crm_warn("Cannot count %s round %d %s from %s because we are not election owner (%s)",
                      e->name, vote.election_id, vote.op, vote.from,
                      vote.election_owner);
             return election_error;
         }
         if (e->state != election_in_progress) {
             // Should only happen if we already lost
             crm_debug("Not counting %s round %d %s from %s because no election in progress",
                       e->name, vote.election_id, vote.op, vote.from);
             return e->state;
         }
         record_vote(e, &vote);
         reason = "Recorded";
         done = TRUE;
 
     } else {
         // A peer vote requires a comparison to determine which node is better
         int age_result = compare_age(vote.age);
         int version_result = compare_version(vote.version, CRM_FEATURE_SET);
 
         if (version_result < 0) {
             reason = "Version";
             we_lose = TRUE;
 
         } else if (version_result > 0) {
             reason = "Version";
 
         } else if (age_result < 0) {
             reason = "Uptime";
             we_lose = TRUE;
 
         } else if (age_result > 0) {
             reason = "Uptime";
 
         } else if (strcasecmp(e->uname, vote.from) > 0) {
             reason = "Host name";
             we_lose = TRUE;
 
         } else {
             reason = "Host name";
         }
     }
 
     if (e->expires < tm_now) {
         e->election_wins = 0;
         e->expires = tm_now + STORM_INTERVAL;
 
     } else if (done == FALSE && we_lose == FALSE) {
         int peers = 1 + g_hash_table_size(crm_peer_cache);
 
         /* If every node has to vote down every other node, thats N*(N-1) total elections
          * Allow some leeway before _really_ complaining
          */
         e->election_wins++;
         if (e->election_wins > (peers * peers)) {
             crm_warn("%s election storm detected: %d wins in %d seconds",
                      e->name, e->election_wins, STORM_INTERVAL);
             e->election_wins = 0;
             e->expires = tm_now + STORM_INTERVAL;
             if (e->wrote_blackbox == FALSE) {
                 /* It's questionable whether a black box (from every node in the
                  * cluster) would be truly helpful in diagnosing an election
                  * storm. It's also highly doubtful a production environment
                  * would get multiple election storms from distinct causes, so
                  * saving one blackbox per process lifetime should be
                  * sufficient. Alternatives would be to save a timestamp of the
                  * last blackbox write instead of a boolean, and write a new one
                  * if some amount of time has passed; or to save a storm count,
                  * write a blackbox on every Nth occurrence.
                  */
                 crm_write_blackbox(0, NULL);
                 e->wrote_blackbox = TRUE;
             }
         }
     }
 
     if (done) {
         do_crm_log(log_level + 1,
                    "Processed %s round %d %s (current round %d) from %s (%s)",
                    e->name, vote.election_id, vote.op, e->count, vote.from,
                    reason);
         return e->state;
 
     } else if (we_lose == FALSE) {
         /* We track the time of the last election loss to implement an election
          * dampening period, reducing the likelihood of an election storm. If
          * this node has lost within the dampening period, don't start a new
          * election, even if we win against a peer's vote -- the peer we lost to
          * should win again.
          *
          * @TODO This has a problem case: if an election winner immediately
          * leaves the cluster, and a new election is immediately called, all
          * nodes could lose, with no new winner elected. The ideal solution
          * would be to tie the election structure with the peer caches, which
          * would allow us to clear the dampening when the previous winner
          * leaves (and would allow other improvements as well).
          */
         if ((e->last_election_loss == 0)
             || ((tm_now - e->last_election_loss) > (time_t) LOSS_DAMPEN)) {
 
             do_crm_log(log_level, "%s round %d (owner node ID %s) pass: %s from %s (%s)",
                        e->name, vote.election_id, vote.election_owner, vote.op,
                        vote.from, reason);
 
             e->last_election_loss = 0;
             election_timeout_stop(e);
 
             /* Start a new election by voting down this, and other, peers */
             e->state = election_start;
             return e->state;
         } else {
             char *loss_time = ctime(&e->last_election_loss);
 
             if (loss_time) {
                 // Show only HH:MM:SS
                 loss_time += 11;
                 loss_time[8] = '\0';
             }
             crm_info("Ignoring %s round %d (owner node ID %s) pass vs %s because we lost less than %ds ago at %s",
                      e->name, vote.election_id, vote.election_owner, vote.from,
                      LOSS_DAMPEN, (loss_time? loss_time : "unknown"));
         }
     }
 
     e->last_election_loss = tm_now;
 
     do_crm_log(log_level, "%s round %d (owner node ID %s) lost: %s from %s (%s)",
                e->name, vote.election_id, vote.election_owner, vote.op,
                vote.from, reason);
 
     election_reset(e);
     send_no_vote(your_node, &vote);
     e->state = election_lost;
     return e->state;
 }
 
 /*!
  * \brief Reset any election dampening currently in effect
  *
  * \param[in,out] e        Election object to clear
  */
 void
 election_clear_dampening(election_t *e)
 {
     e->last_election_loss = 0;
 }
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
index d78411fc4c..679244e5b0 100644
--- a/lib/cluster/membership.c
+++ b/lib/cluster/membership.c
@@ -1,1421 +1,1441 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <sys/param.h>
 #include <sys/types.h>
 #include <stdio.h>
 #include <unistd.h>
 #include <string.h>
 #include <glib.h>
 #include <crm/common/ipc.h>
 #include <crm/common/xml_internal.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/xml.h>
 #include <crm/stonith-ng.h>
 #include "crmcluster_private.h"
 
 /* The peer cache remembers cluster nodes that have been seen.
  * This is managed mostly automatically by libcluster, based on
  * cluster membership events.
  *
  * Because cluster nodes can have conflicting names or UUIDs,
  * the hash table key is a uniquely generated ID.
  */
 GHashTable *crm_peer_cache = NULL;
 
 /*
  * The remote peer cache tracks pacemaker_remote nodes. While the
  * value has the same type as the peer cache's, it is tracked separately for
  * three reasons: pacemaker_remote nodes can't have conflicting names or UUIDs,
  * so the name (which is also the UUID) is used as the hash table key; there
  * is no equivalent of membership events, so management is not automatic; and
  * most users of the peer cache need to exclude pacemaker_remote nodes.
  *
  * That said, using a single cache would be more logical and less error-prone,
  * so it would be a good idea to merge them one day.
  *
  * libcluster provides two avenues for populating the cache:
  * pcmk__cluster_lookup_remote_node() and pcmk__cluster_forget_remote_node()
  * directly manage it, while refresh_remote_nodes() populates it via the CIB.
  */
 GHashTable *crm_remote_peer_cache = NULL;
 
 /*
  * The known node cache tracks cluster and remote nodes that have been seen in
  * the CIB. It is useful mainly when a caller needs to know about a node that
  * may no longer be in the membership, but doesn't want to add the node to the
  * main peer cache tables.
  */
 static GHashTable *known_node_cache = NULL;
 
 unsigned long long crm_peer_seq = 0;
 gboolean crm_have_quorum = FALSE;
 static gboolean crm_autoreap  = TRUE;
 
 // Flag setting and clearing for crm_node_t:flags
 
 #define set_peer_flags(peer, flags_to_set) do {                               \
         (peer)->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Peer", (peer)->uname,             \
                                            (peer)->flags, (flags_to_set),     \
                                            #flags_to_set);                    \
     } while (0)
 
 #define clear_peer_flags(peer, flags_to_clear) do {                           \
         (peer)->flags = pcmk__clear_flags_as(__func__, __LINE__,              \
                                              LOG_TRACE,                       \
                                              "Peer", (peer)->uname,           \
                                              (peer)->flags, (flags_to_clear), \
                                              #flags_to_clear);                \
     } while (0)
 
 static void update_peer_uname(crm_node_t *node, const char *uname);
 static crm_node_t *find_known_node(const char *id, const char *uname);
 
 /*!
  * \internal
  * \brief Get the number of Pacemaker Remote nodes that have been seen
  *
  * \return Number of cached Pacemaker Remote nodes
  */
 unsigned int
 pcmk__cluster_num_remote_nodes(void)
 {
     if (crm_remote_peer_cache == NULL) {
         return 0U;
     }
     return g_hash_table_size(crm_remote_peer_cache);
 }
 
 /*!
  * \internal
  * \brief Get a remote node cache entry, creating it if necessary
  *
  * \param[in] node_name  Name of remote node
  *
  * \return Cache entry for node on success, or \c NULL (and set \c errno)
  *         otherwise
  *
  * \note When creating a new entry, this will leave the node state undetermined.
  *       The caller should also call \c pcmk__update_peer_state() if the state
  *       is known.
  * \note Because this can add and remove cache entries, callers should not
  *       assume any previously obtained cache entry pointers remain valid.
  */
 crm_node_t *
 pcmk__cluster_lookup_remote_node(const char *node_name)
 {
     crm_node_t *node;
     char *node_name_copy = NULL;
 
     if (node_name == NULL) {
         errno = EINVAL;
         return NULL;
     }
 
     /* It's theoretically possible that the node was added to the cluster peer
      * cache before it was known to be a Pacemaker Remote node. Remove that
      * entry unless it has a node ID, which means the name actually is
      * associated with a cluster node. (@TODO return an error in that case?)
      */
     node = pcmk__search_node_caches(0, node_name, pcmk__node_search_cluster);
     if ((node != NULL) && (node->uuid == NULL)) {
         /* node_name could be a pointer into the cache entry being removed, so
          * reassign it to a copy before the original gets freed
          */
         node_name_copy = strdup(node_name);
         if (node_name_copy == NULL) {
             errno = ENOMEM;
             return NULL;
         }
         node_name = node_name_copy;
         reap_crm_member(0, node_name);
     }
 
     /* Return existing cache entry if one exists */
     node = g_hash_table_lookup(crm_remote_peer_cache, node_name);
     if (node) {
         free(node_name_copy);
         return node;
     }
 
     /* Allocate a new entry */
     node = calloc(1, sizeof(crm_node_t));
     if (node == NULL) {
         free(node_name_copy);
         return NULL;
     }
 
     /* Populate the essential information */
     set_peer_flags(node, crm_remote_node);
     node->uuid = strdup(node_name);
     if (node->uuid == NULL) {
         free(node);
         errno = ENOMEM;
         free(node_name_copy);
         return NULL;
     }
 
     /* Add the new entry to the cache */
     g_hash_table_replace(crm_remote_peer_cache, node->uuid, node);
     crm_trace("added %s to remote cache", node_name);
 
     /* Update the entry's uname, ensuring peer status callbacks are called */
     update_peer_uname(node, node_name);
     free(node_name_copy);
     return node;
 }
 
 /*!
  * \internal
  * \brief Remove a node from the Pacemaker Remote node cache
  *
  * \param[in] node_name  Name of node to remove from cache
  *
  * \note The caller must be careful not to use \p node_name after calling this
  *       function if it might be a pointer into the cache entry being removed.
  */
 void
 pcmk__cluster_forget_remote_node(const char *node_name)
 {
     /* Do a lookup first, because node_name could be a pointer within the entry
      * being removed -- we can't log it *after* removing it.
      */
     if (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL) {
         crm_trace("Removing %s from Pacemaker Remote node cache", node_name);
         g_hash_table_remove(crm_remote_peer_cache, node_name);
     }
 }
 
 /*!
  * \internal
  * \brief Return node status based on a CIB status entry
  *
  * \param[in] node_state  XML of node state
  *
  * \return \c CRM_NODE_LOST if \c PCMK__XA_IN_CCM is false in
  *         \c PCMK__XE_NODE_STATE, \c CRM_NODE_MEMBER otherwise
  * \note Unlike most boolean XML attributes, this one defaults to true, for
  *       backward compatibility with older controllers that don't set it.
  */
 static const char *
 remote_state_from_cib(const xmlNode *node_state)
 {
     bool status = false;
 
     if ((pcmk__xe_get_bool_attr(node_state, PCMK__XA_IN_CCM,
                                 &status) == pcmk_rc_ok) && !status) {
         return CRM_NODE_LOST;
     } else {
         return CRM_NODE_MEMBER;
     }
 }
 
 /* user data for looping through remote node xpath searches */
 struct refresh_data {
     const char *field;  /* XML attribute to check for node name */
     gboolean has_state; /* whether to update node state based on XML */
 };
 
 /*!
  * \internal
  * \brief Process one pacemaker_remote node xpath search result
  *
  * \param[in] result     XML search result
  * \param[in] user_data  what to look for in the XML
  */
 static void
 remote_cache_refresh_helper(xmlNode *result, void *user_data)
 {
     const struct refresh_data *data = user_data;
     const char *remote = crm_element_value(result, data->field);
     const char *state = NULL;
     crm_node_t *node;
 
     CRM_CHECK(remote != NULL, return);
 
     /* Determine node's state, if the result has it */
     if (data->has_state) {
         state = remote_state_from_cib(result);
     }
 
     /* Check whether cache already has entry for node */
     node = g_hash_table_lookup(crm_remote_peer_cache, remote);
 
     if (node == NULL) {
         /* Node is not in cache, so add a new entry for it */
         node = pcmk__cluster_lookup_remote_node(remote);
         CRM_ASSERT(node);
         if (state) {
             pcmk__update_peer_state(__func__, node, state, 0);
         }
 
     } else if (pcmk_is_set(node->flags, crm_node_dirty)) {
         /* Node is in cache and hasn't been updated already, so mark it clean */
         clear_peer_flags(node, crm_node_dirty);
         if (state) {
             pcmk__update_peer_state(__func__, node, state, 0);
         }
     }
 }
 
 static void
 mark_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     set_peer_flags((crm_node_t *) value, crm_node_dirty);
 }
 
 static gboolean
 is_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     return pcmk_is_set(((crm_node_t*)value)->flags, crm_node_dirty);
 }
 
 /*!
  * \internal
  * \brief Repopulate the remote node cache based on CIB XML
  *
  * \param[in] cib  CIB XML to parse
  */
 static void
 refresh_remote_nodes(xmlNode *cib)
 {
     struct refresh_data data;
 
     crm_peer_init();
 
     /* First, we mark all existing cache entries as dirty,
      * so that later we can remove any that weren't in the CIB.
      * We don't empty the cache, because we need to detect changes in state.
      */
     g_hash_table_foreach(crm_remote_peer_cache, mark_dirty, NULL);
 
     /* Look for guest nodes and remote nodes in the status section */
     data.field = PCMK_XA_ID;
     data.has_state = TRUE;
     crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_STATUS,
                              remote_cache_refresh_helper, &data);
 
     /* Look for guest nodes and remote nodes in the configuration section,
      * because they may have just been added and not have a status entry yet.
      * In that case, the cached node state will be left NULL, so that the
      * peer status callback isn't called until we're sure the node started
      * successfully.
      */
     data.field = PCMK_XA_VALUE;
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, PCMK__XP_GUEST_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
     data.field = PCMK_XA_ID;
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
 
     /* Remove all old cache entries that weren't seen in the CIB */
     g_hash_table_foreach_remove(crm_remote_peer_cache, is_dirty, NULL);
 }
 
-gboolean
-crm_is_peer_active(const crm_node_t * node)
+/*!
+ * \internal
+ * \brief Check whether a node is an active cluster node
+ *
+ * Remote nodes are never considered active. This guarantees that they can never
+ * become DC.
+ *
+ * \param[in] node  Node to check
+ *
+ * \return \c true if the node is an active cluster node, or \c false otherwise
+ */
+bool
+pcmk__cluster_is_node_active(const crm_node_t *node)
 {
-    if(node == NULL) {
-        return FALSE;
-    }
+    const enum cluster_type_e type = get_cluster_type();
 
-    if (pcmk_is_set(node->flags, crm_remote_node)) {
-        /* remote nodes are never considered active members. This
-         * guarantees they will never be considered for DC membership.*/
-        return FALSE;
+    if ((node == NULL) || pcmk_is_set(node->flags, crm_remote_node)) {
+        return false;
     }
+
+    switch (type) {
+        case pcmk_cluster_corosync:
 #if SUPPORT_COROSYNC
-    if (is_corosync_cluster()) {
-        return crm_is_corosync_peer_active(node);
+            return crm_is_corosync_peer_active(node);
+#else
+            break;
+#endif  // SUPPORT_COROSYNC
+        default:
+            break;
     }
-#endif
-    crm_err("Unhandled cluster type: %s", name_for_cluster_type(get_cluster_type()));
-    return FALSE;
+
+    crm_err("Unhandled cluster type: %s", name_for_cluster_type(type));
+    return false;
+}
+
+gboolean
+crm_is_peer_active(const crm_node_t * node)
+{
+    return pcmk__cluster_is_node_active(node);
 }
 
 static gboolean
 crm_reap_dead_member(gpointer key, gpointer value, gpointer user_data)
 {
     crm_node_t *node = value;
     crm_node_t *search = user_data;
 
     if (search == NULL) {
         return FALSE;
 
     } else if (search->id && node->id != search->id) {
         return FALSE;
 
     } else if (search->id == 0 && !pcmk__str_eq(node->uname, search->uname, pcmk__str_casei)) {
         return FALSE;
 
-    } else if (crm_is_peer_active(value) == FALSE) {
+    } else if (!pcmk__cluster_is_node_active(value)) {
         crm_info("Removing node with name %s and " PCMK_XA_ID
                  " %u from membership cache",
                  (node->uname? node->uname : "unknown"), node->id);
         return TRUE;
     }
     return FALSE;
 }
 
 /*!
  * \brief Remove all peer cache entries matching a node ID and/or uname
  *
  * \param[in] id    ID of node to remove (or 0 to ignore)
  * \param[in] name  Uname of node to remove (or NULL to ignore)
  *
  * \return Number of cache entries removed
  *
  * \note The caller must be careful not to use \p name after calling this
  *       function if it might be a pointer into the cache entry being removed.
  */
 guint
 reap_crm_member(uint32_t id, const char *name)
 {
     int matches = 0;
     crm_node_t search = { 0, };
 
     if (crm_peer_cache == NULL) {
         crm_trace("Membership cache not initialized, ignoring purge request");
         return 0;
     }
 
     search.id = id;
     search.uname = pcmk__str_copy(name);
     matches = g_hash_table_foreach_remove(crm_peer_cache, crm_reap_dead_member, &search);
     if(matches) {
         crm_notice("Purged %d peer%s with " PCMK_XA_ID
                    "=%u%s%s from the membership cache",
                    matches, pcmk__plural_s(matches), search.id,
                    (search.uname? " and/or uname=" : ""),
                    (search.uname? search.uname : ""));
 
     } else {
         crm_info("No peers with " PCMK_XA_ID
                  "=%u%s%s to purge from the membership cache",
                  search.id, (search.uname? " and/or uname=" : ""),
                  (search.uname? search.uname : ""));
     }
 
     free(search.uname);
     return matches;
 }
 
 static void
 count_peer(gpointer key, gpointer value, gpointer user_data)
 {
     guint *count = user_data;
     crm_node_t *node = value;
 
-    if (crm_is_peer_active(node)) {
+    if (pcmk__cluster_is_node_active(node)) {
         *count = *count + 1;
     }
 }
 
 guint
 crm_active_peers(void)
 {
     guint count = 0;
 
     if (crm_peer_cache) {
         g_hash_table_foreach(crm_peer_cache, count_peer, &count);
     }
     return count;
 }
 
 static void
 destroy_crm_node(gpointer data)
 {
     crm_node_t *node = data;
 
     crm_trace("Destroying entry for node %u: %s", node->id, node->uname);
 
     free(node->uname);
     free(node->state);
     free(node->uuid);
     free(node->expected);
     free(node->conn_host);
     free(node);
 }
 
 void
 crm_peer_init(void)
 {
     if (crm_peer_cache == NULL) {
         crm_peer_cache = pcmk__strikey_table(free, destroy_crm_node);
     }
 
     if (crm_remote_peer_cache == NULL) {
         crm_remote_peer_cache = pcmk__strikey_table(NULL, destroy_crm_node);
     }
 
     if (known_node_cache == NULL) {
         known_node_cache = pcmk__strikey_table(free, destroy_crm_node);
     }
 }
 
 void
 crm_peer_destroy(void)
 {
     if (crm_peer_cache != NULL) {
         crm_trace("Destroying peer cache with %d members", g_hash_table_size(crm_peer_cache));
         g_hash_table_destroy(crm_peer_cache);
         crm_peer_cache = NULL;
     }
 
     if (crm_remote_peer_cache != NULL) {
         crm_trace("Destroying remote peer cache with %d members",
                   pcmk__cluster_num_remote_nodes());
         g_hash_table_destroy(crm_remote_peer_cache);
         crm_remote_peer_cache = NULL;
     }
 
     if (known_node_cache != NULL) {
         crm_trace("Destroying known node cache with %d members",
                   g_hash_table_size(known_node_cache));
         g_hash_table_destroy(known_node_cache);
         known_node_cache = NULL;
     }
 
 }
 
 static void (*peer_status_callback)(enum crm_status_type, crm_node_t *,
                                     const void *) = NULL;
 
 /*!
  * \brief Set a client function that will be called after peer status changes
  *
  * \param[in] dispatch  Pointer to function to use as callback
  *
  * \note Previously, client callbacks were responsible for peer cache
  *       management. This is no longer the case, and client callbacks should do
  *       only client-specific handling. Callbacks MUST NOT add or remove entries
  *       in the peer caches.
  */
 void
 crm_set_status_callback(void (*dispatch) (enum crm_status_type, crm_node_t *, const void *))
 {
     peer_status_callback = dispatch;
 }
 
 /*!
  * \brief Tell the library whether to automatically reap lost nodes
  *
  * If TRUE (the default), calling crm_update_peer_proc() will also update the
  * peer state to CRM_NODE_MEMBER or CRM_NODE_LOST, and pcmk__update_peer_state()
  * will reap peers whose state changes to anything other than CRM_NODE_MEMBER.
  * Callers should leave this enabled unless they plan to manage the cache
  * separately on their own.
  *
  * \param[in] autoreap  TRUE to enable automatic reaping, FALSE to disable
  */
 void
 crm_set_autoreap(gboolean autoreap)
 {
     crm_autoreap = autoreap;
 }
 
 static void
 dump_peer_hash(int level, const char *caller)
 {
     GHashTableIter iter;
     const char *id = NULL;
     crm_node_t *node = NULL;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, (gpointer *) &id, (gpointer *) &node)) {
         do_crm_log(level, "%s: Node %u/%s = %p - %s", caller, node->id, node->uname, node, id);
     }
 }
 
 static gboolean
 hash_find_by_data(gpointer key, gpointer value, gpointer user_data)
 {
     return value == user_data;
 }
 
 /*!
  * \internal
  * \brief Search caches for a node (cluster or Pacemaker Remote)
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] flags  Group of enum pcmk__node_search_flags
  *
  * \return Node cache entry if found, otherwise NULL
  */
 crm_node_t *
 pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags)
 {
     crm_node_t *node = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     if ((uname != NULL) && pcmk_is_set(flags, pcmk__node_search_remote)) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
     }
 
     if ((node == NULL) && pcmk_is_set(flags, pcmk__node_search_cluster)) {
         node = pcmk__search_cluster_node_cache(id, uname, NULL);
     }
 
     if ((node == NULL) && pcmk_is_set(flags, pcmk__node_search_known)) {
         char *id_str = (id == 0)? NULL : crm_strdup_printf("%u", id);
 
         node = find_known_node(id_str, uname);
         free(id_str);
     }
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Purge a node from cache (both cluster and Pacemaker Remote)
  *
  * \param[in] node_name  If not NULL, purge only nodes with this name
  * \param[in] node_id    If not 0, purge cluster nodes only if they have this ID
  *
  * \note If \p node_name is NULL and \p node_id is 0, no nodes will be purged.
  *       If \p node_name is not NULL and \p node_id is not 0, Pacemaker Remote
  *       nodes that match \p node_name will be purged, and cluster nodes that
  *       match both \p node_name and \p node_id will be purged.
  * \note The caller must be careful not to use \p node_name after calling this
  *       function if it might be a pointer into a cache entry being removed.
  */
 void
 pcmk__purge_node_from_cache(const char *node_name, uint32_t node_id)
 {
     char *node_name_copy = NULL;
 
     if ((node_name == NULL) && (node_id == 0U)) {
         return;
     }
 
     // Purge from Pacemaker Remote node cache
     if ((node_name != NULL)
         && (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL)) {
         /* node_name could be a pointer into the cache entry being purged,
          * so reassign it to a copy before the original gets freed
          */
         node_name_copy = pcmk__str_copy(node_name);
         node_name = node_name_copy;
 
         crm_trace("Purging %s from Pacemaker Remote node cache", node_name);
         g_hash_table_remove(crm_remote_peer_cache, node_name);
     }
 
     reap_crm_member(node_id, node_name);
     free(node_name_copy);
 }
 
 /*!
  * \internal
  * \brief Search cluster node cache
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] uuid   If not NULL while id is 0, node UUID instead of cluster
  *                   node ID to search for
  *
  * \return Cluster node cache entry if found, otherwise NULL
  */
 crm_node_t *
 pcmk__search_cluster_node_cache(unsigned int id, const char *uname,
                                 const char *uuid)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
     crm_node_t *by_id = NULL;
     crm_node_t *by_name = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     if (uname != NULL) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(node->uname && strcasecmp(node->uname, uname) == 0) {
                 crm_trace("Name match: %s = %p", node->uname, node);
                 by_name = node;
                 break;
             }
         }
     }
 
     if (id > 0) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(node->id == id) {
                 crm_trace("ID match: %u = %p", node->id, node);
                 by_id = node;
                 break;
             }
         }
 
     } else if (uuid != NULL) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (pcmk__str_eq(node->uuid, uuid, pcmk__str_casei)) {
                 crm_trace("UUID match: %s = %p", node->uuid, node);
                 by_id = node;
                 break;
             }
         }
     }
 
     node = by_id; /* Good default */
     if(by_id == by_name) {
         /* Nothing to do if they match (both NULL counts) */
         crm_trace("Consistent: %p for %u/%s", by_id, id, uname);
 
     } else if(by_id == NULL && by_name) {
         crm_trace("Only one: %p for %u/%s", by_name, id, uname);
 
         if(id && by_name->id) {
             dump_peer_hash(LOG_WARNING, __func__);
             crm_crit("Node %u and %u share the same name '%s'",
                      id, by_name->id, uname);
             node = NULL; /* Create a new one */
 
         } else {
             node = by_name;
         }
 
     } else if(by_name == NULL && by_id) {
         crm_trace("Only one: %p for %u/%s", by_id, id, uname);
 
         if(uname && by_id->uname) {
             dump_peer_hash(LOG_WARNING, __func__);
             crm_crit("Node '%s' and '%s' share the same cluster nodeid %u: assuming '%s' is correct",
                      uname, by_id->uname, id, uname);
         }
 
     } else if(uname && by_id->uname) {
         if(pcmk__str_eq(uname, by_id->uname, pcmk__str_casei)) {
             crm_notice("Node '%s' has changed its ID from %u to %u", by_id->uname, by_name->id, by_id->id);
             g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name);
 
         } else {
             crm_warn("Node '%s' and '%s' share the same cluster nodeid: %u %s", by_id->uname, by_name->uname, id, uname);
             dump_peer_hash(LOG_INFO, __func__);
             crm_abort(__FILE__, __func__, __LINE__, "member weirdness", TRUE,
                       TRUE);
         }
 
     } else if(id && by_name->id) {
         crm_warn("Node %u and %u share the same name: '%s'", by_id->id, by_name->id, uname);
 
     } else {
         /* Simple merge */
 
         /* Only corosync-based clusters use node IDs. The functions that call
          * pcmk__update_peer_state() and crm_update_peer_proc() only know
          * nodeid, so 'by_id' is authoritative when merging.
          */
         dump_peer_hash(LOG_DEBUG, __func__);
 
         crm_info("Merging %p into %p", by_name, by_id);
         g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name);
     }
 
     return node;
 }
 
 #if SUPPORT_COROSYNC
 static guint
 remove_conflicting_peer(crm_node_t *node)
 {
     int matches = 0;
     GHashTableIter iter;
     crm_node_t *existing_node = NULL;
 
     if (node->id == 0 || node->uname == NULL) {
         return 0;
     }
 
     if (!pcmk__corosync_has_nodelist()) {
         return 0;
     }
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &existing_node)) {
         if (existing_node->id > 0
             && existing_node->id != node->id
             && existing_node->uname != NULL
             && strcasecmp(existing_node->uname, node->uname) == 0) {
 
-            if (crm_is_peer_active(existing_node)) {
+            if (pcmk__cluster_is_node_active(existing_node)) {
                 continue;
             }
 
             crm_warn("Removing cached offline node %u/%s which has conflicting uname with %u",
                      existing_node->id, existing_node->uname, node->id);
 
             g_hash_table_iter_remove(&iter);
             matches++;
         }
     }
 
     return matches;
 }
 #endif
 
 /*!
  * \brief Get a cluster node cache entry
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] uuid   If not NULL while id is 0, node UUID instead of cluster
  *                   node ID to search for
  * \param[in] flags  Group of enum pcmk__node_search_flags
  *
  * \return (Possibly newly created) cluster node cache entry
  */
 /* coverity[-alloc] Memory is referenced in one or both hashtables */
 crm_node_t *
 pcmk__get_node(unsigned int id, const char *uname, const char *uuid,
                uint32_t flags)
 {
     crm_node_t *node = NULL;
     char *uname_lookup = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     crm_peer_init();
 
     // Check the Pacemaker Remote node cache first
     if (pcmk_is_set(flags, pcmk__node_search_remote)) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
         if (node != NULL) {
             return node;
         }
     }
 
     if (!pcmk_is_set(flags, pcmk__node_search_cluster)) {
         return NULL;
     }
 
     node = pcmk__search_cluster_node_cache(id, uname, uuid);
 
     /* if uname wasn't provided, and find_peer did not turn up a uname based on id.
      * we need to do a lookup of the node name using the id in the cluster membership. */
     if ((node == NULL || node->uname == NULL) && (uname == NULL)) { 
         uname_lookup = get_node_name(id);
     }
 
     if (uname_lookup) {
         uname = uname_lookup;
         crm_trace("Inferred a name of '%s' for node %u", uname, id);
 
         /* try to turn up the node one more time now that we know the uname. */
         if (node == NULL) {
             node = pcmk__search_cluster_node_cache(id, uname, uuid);
         }
     }
 
     if (node == NULL) {
         char *uniqueid = crm_generate_uuid();
 
         node = pcmk__assert_alloc(1, sizeof(crm_node_t));
 
         crm_info("Created entry %s/%p for node %s/%u (%d total)",
                  uniqueid, node, uname, id, 1 + g_hash_table_size(crm_peer_cache));
         g_hash_table_replace(crm_peer_cache, uniqueid, node);
     }
 
     if(id > 0 && uname && (node->id == 0 || node->uname == NULL)) {
         crm_info("Node %u is now known as %s", id, uname);
     }
 
     if(id > 0 && node->id == 0) {
         node->id = id;
     }
 
     if (uname && (node->uname == NULL)) {
         update_peer_uname(node, uname);
     }
 
     if(node->uuid == NULL) {
         if (uuid == NULL) {
             uuid = crm_peer_uuid(node);
         }
 
         if (uuid) {
             crm_info("Node %u has uuid %s", id, uuid);
 
         } else {
             crm_info("Cannot obtain a UUID for node %u/%s", id, node->uname);
         }
     }
 
     free(uname_lookup);
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Update a node's uname
  *
  * \param[in,out] node   Node object to update
  * \param[in]     uname  New name to set
  *
  * \note This function should not be called within a peer cache iteration,
  *       because in some cases it can remove conflicting cache entries,
  *       which would invalidate the iterator.
  */
 static void
 update_peer_uname(crm_node_t *node, const char *uname)
 {
     CRM_CHECK(uname != NULL,
               crm_err("Bug: can't update node name without name"); return);
     CRM_CHECK(node != NULL,
               crm_err("Bug: can't update node name to %s without node", uname);
               return);
 
     if (pcmk__str_eq(uname, node->uname, pcmk__str_casei)) {
         crm_debug("Node uname '%s' did not change", uname);
         return;
     }
 
     for (const char *c = uname; *c; ++c) {
         if ((*c >= 'A') && (*c <= 'Z')) {
             crm_warn("Node names with capitals are discouraged, consider changing '%s'",
                      uname);
             break;
         }
     }
 
     pcmk__str_update(&node->uname, uname);
 
     if (peer_status_callback != NULL) {
         peer_status_callback(crm_status_uname, node, NULL);
     }
 
 #if SUPPORT_COROSYNC
     if (is_corosync_cluster() && !pcmk_is_set(node->flags, crm_remote_node)) {
         remove_conflicting_peer(node);
     }
 #endif
 }
 
 /*!
  * \internal
  * \brief Get log-friendly string equivalent of a process flag
  *
  * \param[in] proc  Process flag
  *
  * \return Log-friendly string equivalent of \p proc
  */
 static inline const char *
 proc2text(enum crm_proc_flag proc)
 {
     const char *text = "unknown";
 
     switch (proc) {
         case crm_proc_none:
             text = "none";
             break;
         case crm_proc_based:
             text = "pacemaker-based";
             break;
         case crm_proc_controld:
             text = "pacemaker-controld";
             break;
         case crm_proc_schedulerd:
             text = "pacemaker-schedulerd";
             break;
         case crm_proc_execd:
             text = "pacemaker-execd";
             break;
         case crm_proc_attrd:
             text = "pacemaker-attrd";
             break;
         case crm_proc_fenced:
             text = "pacemaker-fenced";
             break;
         case crm_proc_cpg:
             text = "corosync-cpg";
             break;
     }
     return text;
 }
 
 /*!
  * \internal
  * \brief Update a node's process information (and potentially state)
  *
  * \param[in]     source  Caller's function name (for log messages)
  * \param[in,out] node    Node object to update
  * \param[in]     flag    Bitmask of new process information
  * \param[in]     status  node status (online, offline, etc.)
  *
  * \return NULL if any node was reaped from peer caches, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible, otherwise
  *       reaping could invalidate the iterator.
  */
 crm_node_t *
 crm_update_peer_proc(const char *source, crm_node_t * node, uint32_t flag, const char *status)
 {
     uint32_t last = 0;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set %s to %s for NULL",
                                     source, proc2text(flag), status);
                             return NULL);
 
     /* Pacemaker doesn't spawn processes on remote nodes */
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         return node;
     }
 
     last = node->processes;
     if (status == NULL) {
         node->processes = flag;
         if (node->processes != last) {
             changed = TRUE;
         }
 
     } else if (pcmk__str_eq(status, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
         if ((node->processes & flag) != flag) {
             node->processes = pcmk__set_flags_as(__func__, __LINE__,
                                                  LOG_TRACE, "Peer process",
                                                  node->uname, node->processes,
                                                  flag, "processes");
             changed = TRUE;
         }
 
     } else if (node->processes & flag) {
         node->processes = pcmk__clear_flags_as(__func__, __LINE__,
                                                LOG_TRACE, "Peer process",
                                                node->uname, node->processes,
                                                flag, "processes");
         changed = TRUE;
     }
 
     if (changed) {
         if (status == NULL && flag <= crm_proc_none) {
             crm_info("%s: Node %s[%u] - all processes are now offline", source, node->uname,
                      node->id);
         } else {
             crm_info("%s: Node %s[%u] - %s is now %s", source, node->uname, node->id,
                      proc2text(flag), status);
         }
 
         if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
             node->when_online = time(NULL);
 
         } else {
             node->when_online = 0;
         }
 
         /* Call the client callback first, then update the peer state,
          * in case the node will be reaped
          */
         if (peer_status_callback != NULL) {
             peer_status_callback(crm_status_processes, node, &last);
         }
 
         /* The client callback shouldn't touch the peer caches,
          * but as a safety net, bail if the peer cache was destroyed.
          */
         if (crm_peer_cache == NULL) {
             return NULL;
         }
 
         if (crm_autoreap) {
             const char *peer_state = NULL;
 
             if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
                 peer_state = CRM_NODE_MEMBER;
             } else {
                 peer_state = CRM_NODE_LOST;
             }
             node = pcmk__update_peer_state(__func__, node, peer_state, 0);
         }
     } else {
         crm_trace("%s: Node %s[%u] - %s is unchanged (%s)", source, node->uname, node->id,
                   proc2text(flag), status);
     }
     return node;
 }
 
 /*!
  * \internal
  * \brief Update a cluster node cache entry's expected join state
  *
  * \param[in]     source    Caller's function name (for logging)
  * \param[in,out] node      Node to update
  * \param[in]     expected  Node's new join state
  */
 void
 pcmk__update_peer_expected(const char *source, crm_node_t *node,
                            const char *expected)
 {
     char *last = NULL;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set 'expected' to %s", source, expected);
               return);
 
     /* Remote nodes don't participate in joins */
     if (pcmk_is_set(node->flags, crm_remote_node)) {
         return;
     }
 
     last = node->expected;
     if (expected != NULL && !pcmk__str_eq(node->expected, expected, pcmk__str_casei)) {
         node->expected = strdup(expected);
         changed = TRUE;
     }
 
     if (changed) {
         crm_info("%s: Node %s[%u] - expected state is now %s (was %s)", source, node->uname, node->id,
                  expected, last);
         free(last);
     } else {
         crm_trace("%s: Node %s[%u] - expected state is unchanged (%s)", source, node->uname,
                   node->id, expected);
     }
 }
 
 /*!
  * \internal
  * \brief Update a node's state and membership information
  *
  * \param[in]     source      Caller's function name (for log messages)
  * \param[in,out] node        Node object to update
  * \param[in]     state       Node's new state
  * \param[in]     membership  Node's new membership ID
  * \param[in,out] iter        If not NULL, pointer to node's peer cache iterator
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function may be called from
  *       within a peer cache iteration if the iterator is supplied.
  */
 static crm_node_t *
 update_peer_state_iter(const char *source, crm_node_t *node, const char *state,
                        uint64_t membership, GHashTableIter *iter)
 {
     gboolean is_member;
 
     CRM_CHECK(node != NULL,
               crm_err("Could not set state for unknown host to %s"
                       CRM_XS " source=%s", state, source);
               return NULL);
 
     is_member = pcmk__str_eq(state, CRM_NODE_MEMBER, pcmk__str_casei);
     if (is_member) {
         node->when_lost = 0;
         if (membership) {
             node->last_seen = membership;
         }
     }
 
     if (state && !pcmk__str_eq(node->state, state, pcmk__str_casei)) {
         char *last = node->state;
 
         if (is_member) {
              node->when_member = time(NULL);
 
         } else {
              node->when_member = 0;
         }
 
         node->state = strdup(state);
         crm_notice("Node %s state is now %s " CRM_XS
                    " nodeid=%u previous=%s source=%s", node->uname, state,
                    node->id, (last? last : "unknown"), source);
         if (peer_status_callback != NULL) {
             peer_status_callback(crm_status_nstate, node, last);
         }
         free(last);
 
         if (crm_autoreap && !is_member
             && !pcmk_is_set(node->flags, crm_remote_node)) {
             /* We only autoreap from the peer cache, not the remote peer cache,
              * because the latter should be managed only by
              * refresh_remote_nodes().
              */
             if(iter) {
                 crm_notice("Purged 1 peer with " PCMK_XA_ID
                            "=%u and/or uname=%s from the membership cache",
                            node->id, node->uname);
                 g_hash_table_iter_remove(iter);
 
             } else {
                 reap_crm_member(node->id, node->uname);
             }
             node = NULL;
         }
 
     } else {
         crm_trace("Node %s state is unchanged (%s) " CRM_XS
                   " nodeid=%u source=%s", node->uname, state, node->id, source);
     }
     return node;
 }
 
 /*!
  * \brief Update a node's state and membership information
  *
  * \param[in]     source      Caller's function name (for log messages)
  * \param[in,out] node        Node object to update
  * \param[in]     state       Node's new state
  * \param[in]     membership  Node's new membership ID
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible,
  *       otherwise reaping could invalidate the iterator.
  */
 crm_node_t *
 pcmk__update_peer_state(const char *source, crm_node_t *node,
                         const char *state, uint64_t membership)
 {
     return update_peer_state_iter(source, node, state, membership, NULL);
 }
 
 /*!
  * \internal
  * \brief Reap all nodes from cache whose membership information does not match
  *
  * \param[in] membership  Membership ID of nodes to keep
  */
 void
 pcmk__reap_unseen_nodes(uint64_t membership)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
 
     crm_trace("Reaping unseen nodes...");
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&node)) {
         if (node->last_seen != membership) {
             if (node->state) {
                 /*
                  * Calling update_peer_state_iter() allows us to
                  * remove the node from crm_peer_cache without
                  * invalidating our iterator
                  */
                 update_peer_state_iter(__func__, node, CRM_NODE_LOST,
                                            membership, &iter);
 
             } else {
                 crm_info("State of node %s[%u] is still unknown",
                          node->uname, node->id);
             }
         }
     }
 }
 
 static crm_node_t *
 find_known_node(const char *id, const char *uname)
 {
     GHashTableIter iter;
     crm_node_t *node = NULL;
     crm_node_t *by_id = NULL;
     crm_node_t *by_name = NULL;
 
     if (uname) {
         g_hash_table_iter_init(&iter, known_node_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (node->uname && strcasecmp(node->uname, uname) == 0) {
                 crm_trace("Name match: %s = %p", node->uname, node);
                 by_name = node;
                 break;
             }
         }
     }
 
     if (id) {
         g_hash_table_iter_init(&iter, known_node_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if(strcasecmp(node->uuid, id) == 0) {
                 crm_trace("ID match: %s= %p", id, node);
                 by_id = node;
                 break;
             }
         }
     }
 
     node = by_id; /* Good default */
     if (by_id == by_name) {
         /* Nothing to do if they match (both NULL counts) */
         crm_trace("Consistent: %p for %s/%s", by_id, id, uname);
 
     } else if (by_id == NULL && by_name) {
         crm_trace("Only one: %p for %s/%s", by_name, id, uname);
 
         if (id) {
             node = NULL;
 
         } else {
             node = by_name;
         }
 
     } else if (by_name == NULL && by_id) {
         crm_trace("Only one: %p for %s/%s", by_id, id, uname);
 
         if (uname) {
             node = NULL;
         }
 
     } else if (uname && by_id->uname
                && pcmk__str_eq(uname, by_id->uname, pcmk__str_casei)) {
         /* Multiple nodes have the same uname in the CIB.
          * Return by_id. */
 
     } else if (id && by_name->uuid
                && pcmk__str_eq(id, by_name->uuid, pcmk__str_casei)) {
         /* Multiple nodes have the same id in the CIB.
          * Return by_name. */
         node = by_name;
 
     } else {
         node = NULL;
     }
 
     if (node == NULL) {
         crm_debug("Couldn't find node%s%s%s%s",
                    id? " " : "",
                    id? id : "",
                    uname? " with name " : "",
                    uname? uname : "");
     }
 
     return node;
 }
 
 static void
 known_node_cache_refresh_helper(xmlNode *xml_node, void *user_data)
 {
     const char *id = crm_element_value(xml_node, PCMK_XA_ID);
     const char *uname = crm_element_value(xml_node, PCMK_XA_UNAME);
     crm_node_t * node =  NULL;
 
     CRM_CHECK(id != NULL && uname !=NULL, return);
     node = find_known_node(id, uname);
 
     if (node == NULL) {
         char *uniqueid = crm_generate_uuid();
 
         node = pcmk__assert_alloc(1, sizeof(crm_node_t));
 
         node->uname = pcmk__str_copy(uname);
         node->uuid = pcmk__str_copy(id);
 
         g_hash_table_replace(known_node_cache, uniqueid, node);
 
     } else if (pcmk_is_set(node->flags, crm_node_dirty)) {
         pcmk__str_update(&node->uname, uname);
 
         /* Node is in cache and hasn't been updated already, so mark it clean */
         clear_peer_flags(node, crm_node_dirty);
     }
 
 }
 
 static void
 refresh_known_node_cache(xmlNode *cib)
 {
     crm_peer_init();
 
     g_hash_table_foreach(known_node_cache, mark_dirty, NULL);
 
     crm_foreach_xpath_result(cib, PCMK__XP_MEMBER_NODE_CONFIG,
                              known_node_cache_refresh_helper, NULL);
 
     /* Remove all old cache entries that weren't seen in the CIB */
     g_hash_table_foreach_remove(known_node_cache, is_dirty, NULL);
 }
 
 void
 pcmk__refresh_node_caches_from_cib(xmlNode *cib)
 {
     refresh_remote_nodes(cib);
     refresh_known_node_cache(cib);
 }
 
 // Deprecated functions kept only for backward API compatibility
 // LCOV_EXCL_START
 
 #include <crm/cluster/compat.h>
 
 int
 crm_terminate_member(int nodeid, const char *uname, void *unused)
 {
     return stonith_api_kick(nodeid, uname, 120, TRUE);
 }
 
 int
 crm_terminate_member_no_mainloop(int nodeid, const char *uname, int *connection)
 {
     return stonith_api_kick(nodeid, uname, 120, TRUE);
 }
 
 crm_node_t *
 crm_get_peer(unsigned int id, const char *uname)
 {
     return pcmk__get_node(id, uname, NULL, pcmk__node_search_cluster);
 }
 
 crm_node_t *
 crm_get_peer_full(unsigned int id, const char *uname, int flags)
 {
     return pcmk__get_node(id, uname, NULL, flags);
 }
 
 int
 crm_remote_peer_cache_size(void)
 {
     unsigned int count = pcmk__cluster_num_remote_nodes();
 
     return QB_MIN(count, INT_MAX);
 }
 
 void
 crm_remote_peer_cache_refresh(xmlNode *cib)
 {
     refresh_remote_nodes(cib);
 }
 
 crm_node_t *
 crm_remote_peer_get(const char *node_name)
 {
     return pcmk__cluster_lookup_remote_node(node_name);
 }
 
 void
 crm_remote_peer_cache_remove(const char *node_name)
 {
     pcmk__cluster_forget_remote_node(node_name);
 }
 
 // LCOV_EXCL_STOP
 // End deprecated API