diff --git a/daemons/attrd/attrd_corosync.c b/daemons/attrd/attrd_corosync.c
index bd39e7621f..21374243d3 100644
--- a/daemons/attrd/attrd_corosync.c
+++ b/daemons/attrd/attrd_corosync.c
@@ -1,612 +1,612 @@
 /*
  * Copyright 2013-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <errno.h>
 #include <stdbool.h>
 #include <stdint.h>
 #include <stdlib.h>
 
 #include <crm/cluster.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/logging.h>
 #include <crm/common/results.h>
 #include <crm/common/strings_internal.h>
 #include <crm/common/xml.h>
 
 #include "pacemaker-attrd.h"
 
 static xmlNode *
 attrd_confirmation(int callid)
 {
     xmlNode *node = pcmk__xe_create(NULL, __func__);
 
     crm_xml_add(node, PCMK__XA_T, PCMK__VALUE_ATTRD);
     crm_xml_add(node, PCMK__XA_SRC, pcmk__cluster_local_node_name());
     crm_xml_add(node, PCMK_XA_TASK, PCMK__ATTRD_CMD_CONFIRM);
     crm_xml_add_int(node, PCMK__XA_CALL_ID, callid);
 
     return node;
 }
 
 static void
 attrd_peer_message(pcmk__node_status_t *peer, xmlNode *xml)
 {
     const char *election_op = crm_element_value(xml, PCMK__XA_CRM_TASK);
 
     if (election_op) {
         attrd_handle_election_op(peer, xml);
         return;
     }
 
     if (attrd_shutting_down(false)) {
         /* If we're shutting down, we want to continue responding to election
          * ops as long as we're a cluster member (because our vote may be
          * needed). Ignore all other messages.
          */
         return;
 
     } else {
         pcmk__request_t request = {
             .ipc_client     = NULL,
             .ipc_id         = 0,
             .ipc_flags      = 0,
             .peer           = peer->name,
             .xml            = xml,
             .call_options   = 0,
             .result         = PCMK__UNKNOWN_RESULT,
         };
 
         request.op = crm_element_value_copy(request.xml, PCMK_XA_TASK);
         CRM_CHECK(request.op != NULL, return);
 
         attrd_handle_request(&request);
 
         /* Having finished handling the request, check to see if the originating
          * peer requested confirmation.  If so, send that confirmation back now.
          */
         if (pcmk__xe_attr_is_true(xml, PCMK__XA_CONFIRM) &&
             !pcmk__str_eq(request.op, PCMK__ATTRD_CMD_CONFIRM, pcmk__str_none)) {
             int callid = 0;
             xmlNode *reply = NULL;
 
             /* Add the confirmation ID for the message we are confirming to the
              * response so the originating peer knows what they're a confirmation
              * for.
              */
             crm_element_value_int(xml, PCMK__XA_CALL_ID, &callid);
             reply = attrd_confirmation(callid);
 
             /* And then send the confirmation back to the originating peer.  This
              * ends up right back in this same function (attrd_peer_message) on the
              * peer where it will have to do something with a PCMK__XA_CONFIRM type
              * message.
              */
             crm_debug("Sending %s a confirmation", peer->name);
             attrd_send_message(peer, reply, false);
             pcmk__xml_free(reply);
         }
 
         pcmk__reset_request(&request);
     }
 }
 
 static void
 attrd_cpg_dispatch(cpg_handle_t handle,
                  const struct cpg_name *groupName,
                  uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     xmlNode *xml = NULL;
     const char *from = NULL;
     char *data = pcmk__cpg_message_data(handle, nodeid, pid, msg, &from);
 
     if(data == NULL) {
         return;
     }
 
     xml = pcmk__xml_parse(data);
 
     if (xml == NULL) {
         crm_err("Bad message received from %s[%u]: '%.120s'",
                 from, nodeid, data);
     } else {
         attrd_peer_message(pcmk__get_node(nodeid, from, NULL,
                                           pcmk__node_search_cluster_member),
                            xml);
     }
 
     pcmk__xml_free(xml);
     free(data);
 }
 
 static void
 attrd_cpg_destroy(gpointer unused)
 {
     if (attrd_shutting_down(false)) {
         crm_info("Disconnected from Corosync process group");
 
     } else {
         crm_crit("Lost connection to Corosync process group, shutting down");
         attrd_exit_status = CRM_EX_DISCONNECT;
         attrd_shutdown(0);
     }
 }
 
 /*!
  * \internal
  * \brief Broadcast an update for a single attribute value
  *
  * \param[in] a  Attribute to broadcast
  * \param[in] v  Attribute value to broadcast
  */
 void
 attrd_broadcast_value(const attribute_t *a, const attribute_value_t *v)
 {
     xmlNode *op = pcmk__xe_create(NULL, PCMK_XE_OP);
 
     crm_xml_add(op, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE);
     attrd_add_value_xml(op, a, v, false);
     attrd_send_message(NULL, op, false);
     pcmk__xml_free(op);
 }
 
 #define state_text(state) pcmk__s((state), "in unknown state")
 
 static void
 attrd_peer_change_cb(enum pcmk__node_update kind, pcmk__node_status_t *peer,
                      const void *data)
 {
     bool gone = false;
     bool is_remote = pcmk_is_set(peer->flags, pcmk__node_status_remote);
 
     switch (kind) {
         case pcmk__node_update_name:
             crm_debug("%s node %s is now %s",
                       (is_remote? "Remote" : "Cluster"),
                       peer->name, state_text(peer->state));
             break;
 
         case pcmk__node_update_processes:
             if (!pcmk_is_set(peer->processes, crm_get_cluster_proc())) {
                 gone = true;
             }
             crm_debug("Node %s is %s a peer",
                       peer->name, (gone? "no longer" : "now"));
             break;
 
         case pcmk__node_update_state:
             crm_debug("%s node %s is now %s (was %s)",
                       (is_remote? "Remote" : "Cluster"),
                       peer->name, state_text(peer->state), state_text(data));
-            if (pcmk__str_eq(peer->state, CRM_NODE_MEMBER, pcmk__str_casei)) {
+            if (pcmk__str_eq(peer->state, PCMK_VALUE_MEMBER, pcmk__str_none)) {
                 /* If we're the writer, send new peers a list of all attributes
                  * (unless it's a remote node, which doesn't run its own attrd)
                  */
                 if (attrd_election_won()
                     && !pcmk_is_set(peer->flags, pcmk__node_status_remote)) {
                     attrd_peer_sync(peer);
                 }
             } else {
                 // Remove all attribute values associated with lost nodes
                 attrd_peer_remove(peer->name, false, "loss");
                 gone = true;
             }
             break;
     }
 
     // Remove votes from cluster nodes that leave, in case election in progress
     if (gone && !is_remote) {
         attrd_remove_voter(peer);
         attrd_remove_peer_protocol_ver(peer->name);
         attrd_do_not_expect_from_peer(peer->name);
     }
 }
 
 static void
 record_peer_nodeid(attribute_value_t *v, const char *host)
 {
     pcmk__node_status_t *known_peer =
         pcmk__get_node(v->nodeid, host, NULL, pcmk__node_search_cluster_member);
 
     crm_trace("Learned %s has node id %s",
               known_peer->name, known_peer->xml_id);
     if (attrd_election_won()) {
         attrd_write_attributes(attrd_write_changed);
     }
 }
 
 #define readable_value(rv_v) pcmk__s((rv_v)->current, "(unset)")
 
 #define readable_peer(p)    \
     (((p) == NULL)? "all peers" : pcmk__s((p)->name, "unknown peer"))
 
 static void
 update_attr_on_host(attribute_t *a, const pcmk__node_status_t *peer,
                     const xmlNode *xml, const char *attr, const char *value,
                     const char *host, bool filter)
 {
     int is_remote = 0;
     bool changed = false;
     attribute_value_t *v = NULL;
 
     // Create entry for value if not already existing
     v = g_hash_table_lookup(a->values, host);
     if (v == NULL) {
         v = pcmk__assert_alloc(1, sizeof(attribute_value_t));
 
         v->nodename = pcmk__str_copy(host);
         g_hash_table_replace(a->values, v->nodename, v);
     }
 
     // If value is for a Pacemaker Remote node, remember that
     crm_element_value_int(xml, PCMK__XA_ATTR_IS_REMOTE, &is_remote);
     if (is_remote) {
         attrd_set_value_flags(v, attrd_value_remote);
         CRM_ASSERT(pcmk__cluster_lookup_remote_node(host) != NULL);
     }
 
     // Check whether the value changed
     changed = !pcmk__str_eq(v->current, value, pcmk__str_casei);
 
     if (changed && filter
         && pcmk__str_eq(host, attrd_cluster->priv->node_name,
                         pcmk__str_casei)) {
         /* Broadcast the local value for an attribute that differs from the
          * value provided in a peer's attribute synchronization response. This
          * ensures a node's values for itself take precedence and all peers are
          * kept in sync.
          */
         v = g_hash_table_lookup(a->values, attrd_cluster->priv->node_name);
         crm_notice("%s[%s]: local value '%s' takes priority over '%s' from %s",
                    attr, host, readable_value(v), value, peer->name);
         attrd_broadcast_value(a, v);
 
     } else if (changed) {
         crm_notice("Setting %s[%s]%s%s: %s -> %s "
                    QB_XS " from %s with %s write delay",
                    attr, host, a->set_type ? " in " : "",
                    pcmk__s(a->set_type, ""), readable_value(v),
                    pcmk__s(value, "(unset)"), peer->name,
                    (a->timeout_ms == 0)? "no" : pcmk__readable_interval(a->timeout_ms));
         pcmk__str_update(&v->current, value);
         attrd_set_attr_flags(a, attrd_attr_changed);
 
         if (pcmk__str_eq(host, attrd_cluster->priv->node_name, pcmk__str_casei)
             && pcmk__str_eq(attr, PCMK__NODE_ATTR_SHUTDOWN, pcmk__str_none)) {
 
             if (!pcmk__str_eq(value, "0", pcmk__str_null_matches)) {
                 attrd_set_requesting_shutdown();
 
             } else {
                 attrd_clear_requesting_shutdown();
             }
         }
 
         // Write out new value or start dampening timer
         if (a->timeout_ms && a->timer) {
             crm_trace("Delaying write of %s %s for dampening",
                       attr, pcmk__readable_interval(a->timeout_ms));
             mainloop_timer_start(a->timer);
         } else {
             attrd_write_or_elect_attribute(a);
         }
 
     } else {
         int is_force_write = 0;
 
         crm_element_value_int(xml, PCMK__XA_ATTRD_IS_FORCE_WRITE,
                               &is_force_write);
 
         if (is_force_write == 1 && a->timeout_ms && a->timer) {
             /* Save forced writing and set change flag. */
             /* The actual attribute is written by Writer after election. */
             crm_trace("%s[%s] from %s is unchanged (%s), forcing write",
                       attr, host, peer->name, pcmk__s(value, "unset"));
             attrd_set_attr_flags(a, attrd_attr_force_write);
         } else {
             crm_trace("%s[%s] from %s is unchanged (%s)",
                       attr, host, peer->name, pcmk__s(value, "unset"));
         }
     }
 
     // This allows us to later detect local values that peer doesn't know about
     attrd_set_value_flags(v, attrd_value_from_peer);
 
     /* If this is a cluster node whose node ID we are learning, remember it */
     if ((v->nodeid == 0) && !pcmk_is_set(v->flags, attrd_value_remote)
         && (crm_element_value_int(xml, PCMK__XA_ATTR_HOST_ID,
                                   (int*)&v->nodeid) == 0) && (v->nodeid > 0)) {
         record_peer_nodeid(v, host);
     }
 }
 
 static void
 attrd_peer_update_one(const pcmk__node_status_t *peer, xmlNode *xml,
                       bool filter)
 {
     attribute_t *a = NULL;
     const char *attr = crm_element_value(xml, PCMK__XA_ATTR_NAME);
     const char *value = crm_element_value(xml, PCMK__XA_ATTR_VALUE);
     const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST);
 
     if (attr == NULL) {
         crm_warn("Could not update attribute: peer did not specify name");
         return;
     }
 
     a = attrd_populate_attribute(xml, attr);
     if (a == NULL) {
         return;
     }
 
     if (host == NULL) {
         // If no host was specified, update all hosts
         GHashTableIter vIter;
 
         crm_debug("Setting %s for all hosts to %s", attr, value);
         pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_HOST_ID);
         g_hash_table_iter_init(&vIter, a->values);
 
         while (g_hash_table_iter_next(&vIter, (gpointer *) & host, NULL)) {
             update_attr_on_host(a, peer, xml, attr, value, host, filter);
         }
 
     } else {
         // Update attribute value for the given host
         update_attr_on_host(a, peer, xml, attr, value, host, filter);
     }
 
     /* If this is a message from some attrd instance broadcasting its protocol
      * version, check to see if it's a new minimum version.
      */
     if (pcmk__str_eq(attr, CRM_ATTR_PROTOCOL, pcmk__str_none)) {
         attrd_update_minimum_protocol_ver(peer->name, value);
     }
 }
 
 static void
 broadcast_unseen_local_values(void)
 {
     GHashTableIter aIter;
     GHashTableIter vIter;
     attribute_t *a = NULL;
     attribute_value_t *v = NULL;
     xmlNode *sync = NULL;
 
     g_hash_table_iter_init(&aIter, attributes);
     while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
 
         g_hash_table_iter_init(&vIter, a->values);
         while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) {
 
             if (!pcmk_is_set(v->flags, attrd_value_from_peer)
                 && pcmk__str_eq(v->nodename, attrd_cluster->priv->node_name,
                                 pcmk__str_casei)) {
                 crm_trace("* %s[%s]='%s' is local-only",
                           a->id, v->nodename, readable_value(v));
                 if (sync == NULL) {
                     sync = pcmk__xe_create(NULL, __func__);
                     crm_xml_add(sync, PCMK_XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE);
                 }
                 attrd_add_value_xml(sync, a, v, a->timeout_ms && a->timer);
             }
         }
     }
 
     if (sync != NULL) {
         crm_debug("Broadcasting local-only values");
         attrd_send_message(NULL, sync, false);
         pcmk__xml_free(sync);
     }
 }
 
 int
 attrd_cluster_connect(void)
 {
     int rc = pcmk_rc_ok;
 
     attrd_cluster = pcmk_cluster_new();
 
     pcmk_cluster_set_destroy_fn(attrd_cluster, attrd_cpg_destroy);
     pcmk_cpg_set_deliver_fn(attrd_cluster, attrd_cpg_dispatch);
     pcmk_cpg_set_confchg_fn(attrd_cluster, pcmk__cpg_confchg_cb);
 
     pcmk__cluster_set_status_callback(&attrd_peer_change_cb);
 
     rc = pcmk_cluster_connect(attrd_cluster);
     rc = pcmk_rc2legacy(rc);
     if (rc != pcmk_ok) {
         crm_err("Cluster connection failed");
         return rc;
     }
     return pcmk_ok;
 }
 
 void
 attrd_peer_clear_failure(pcmk__request_t *request)
 {
     xmlNode *xml = request->xml;
     const char *rsc = crm_element_value(xml, PCMK__XA_ATTR_RESOURCE);
     const char *host = crm_element_value(xml, PCMK__XA_ATTR_HOST);
     const char *op = crm_element_value(xml, PCMK__XA_ATTR_CLEAR_OPERATION);
     const char *interval_spec = crm_element_value(xml,
                                                   PCMK__XA_ATTR_CLEAR_INTERVAL);
     guint interval_ms = 0U;
     char *attr = NULL;
     GHashTableIter iter;
     regex_t regex;
 
     pcmk__node_status_t *peer =
         pcmk__get_node(0, request->peer, NULL,
                        pcmk__node_search_cluster_member);
 
     pcmk_parse_interval_spec(interval_spec, &interval_ms);
 
     if (attrd_failure_regex(&regex, rsc, op, interval_ms) != pcmk_ok) {
         crm_info("Ignoring invalid request to clear failures for %s",
                  pcmk__s(rsc, "all resources"));
         return;
     }
 
     crm_xml_add(xml, PCMK_XA_TASK, PCMK__ATTRD_CMD_UPDATE);
 
     /* Make sure value is not set, so we delete */
     pcmk__xe_remove_attr(xml, PCMK__XA_ATTR_VALUE);
 
     g_hash_table_iter_init(&iter, attributes);
     while (g_hash_table_iter_next(&iter, (gpointer *) &attr, NULL)) {
         if (regexec(&regex, attr, 0, NULL, 0) == 0) {
             crm_trace("Matched %s when clearing %s",
                       attr, pcmk__s(rsc, "all resources"));
             crm_xml_add(xml, PCMK__XA_ATTR_NAME, attr);
             attrd_peer_update(peer, xml, host, false);
         }
     }
     regfree(&regex);
 }
 
 /*!
  * \internal
  * \brief Load attributes from a peer sync response
  *
  * \param[in]     peer      Peer that sent sync response
  * \param[in]     peer_won  Whether peer is the attribute writer
  * \param[in,out] xml       Request XML
  */
 void
 attrd_peer_sync_response(const pcmk__node_status_t *peer, bool peer_won,
                          xmlNode *xml)
 {
     crm_info("Processing " PCMK__ATTRD_CMD_SYNC_RESPONSE " from %s",
              peer->name);
 
     if (peer_won) {
         /* Initialize the "seen" flag for all attributes to cleared, so we can
          * detect attributes that local node has but the writer doesn't.
          */
         attrd_clear_value_seen();
     }
 
     // Process each attribute update in the sync response
     for (xmlNode *child = pcmk__xe_first_child(xml, NULL, NULL, NULL);
          child != NULL; child = pcmk__xe_next(child)) {
 
         attrd_peer_update(peer, child,
                           crm_element_value(child, PCMK__XA_ATTR_HOST), true);
     }
 
     if (peer_won) {
         /* If any attributes are still not marked as seen, the writer doesn't
          * know about them, so send all peers an update with them.
          */
         broadcast_unseen_local_values();
     }
 }
 
 /*!
  * \internal
  * \brief Remove all attributes and optionally peer cache entries for a node
  *
  * \param[in] host     Name of node to purge
  * \param[in] uncache  If true, remove node from peer caches
  * \param[in] source   Who requested removal (only used for logging)
  */
 void
 attrd_peer_remove(const char *host, bool uncache, const char *source)
 {
     attribute_t *a = NULL;
     GHashTableIter aIter;
 
     CRM_CHECK(host != NULL, return);
     crm_notice("Removing all %s attributes for node %s "
                QB_XS " %s reaping node from cache",
                host, source, (uncache? "and" : "without"));
 
     g_hash_table_iter_init(&aIter, attributes);
     while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
         if(g_hash_table_remove(a->values, host)) {
             crm_debug("Removed %s[%s] for peer %s", a->id, host, source);
         }
     }
 
     if (uncache) {
         pcmk__purge_node_from_cache(host, 0);
     }
 }
 
 /*!
  * \internal
  * \brief Send all known attributes and values to a peer
  *
  * \param[in] peer  Peer to send sync to (if NULL, broadcast to all peers)
  */
 void
 attrd_peer_sync(pcmk__node_status_t *peer)
 {
     GHashTableIter aIter;
     GHashTableIter vIter;
 
     attribute_t *a = NULL;
     attribute_value_t *v = NULL;
     xmlNode *sync = pcmk__xe_create(NULL, __func__);
 
     crm_xml_add(sync, PCMK_XA_TASK, PCMK__ATTRD_CMD_SYNC_RESPONSE);
 
     g_hash_table_iter_init(&aIter, attributes);
     while (g_hash_table_iter_next(&aIter, NULL, (gpointer *) & a)) {
         g_hash_table_iter_init(&vIter, a->values);
         while (g_hash_table_iter_next(&vIter, NULL, (gpointer *) & v)) {
             crm_debug("Syncing %s[%s]='%s' to %s",
                       a->id, v->nodename, readable_value(v),
                       readable_peer(peer));
             attrd_add_value_xml(sync, a, v, false);
         }
     }
 
     crm_debug("Syncing values to %s", readable_peer(peer));
     attrd_send_message(peer, sync, false);
     pcmk__xml_free(sync);
 }
 
 void
 attrd_peer_update(const pcmk__node_status_t *peer, xmlNode *xml,
                   const char *host, bool filter)
 {
     bool handle_sync_point = false;
 
     CRM_CHECK((peer != NULL) && (xml != NULL), return);
     if (xml->children != NULL) {
         for (xmlNode *child = pcmk__xe_first_child(xml, PCMK_XE_OP, NULL, NULL);
              child != NULL; child = pcmk__xe_next_same(child)) {
 
             pcmk__xe_copy_attrs(child, xml, pcmk__xaf_no_overwrite);
             attrd_peer_update_one(peer, child, filter);
 
             if (attrd_request_has_sync_point(child)) {
                 handle_sync_point = true;
             }
         }
 
     } else {
         attrd_peer_update_one(peer, xml, filter);
 
         if (attrd_request_has_sync_point(xml)) {
             handle_sync_point = true;
         }
     }
 
     /* If the update XML specified that the client wanted to wait for a sync
      * point, process that now.
      */
     if (handle_sync_point) {
         crm_trace("Hit local sync point for attribute update");
         attrd_ack_waitlist_clients(attrd_sync_point_local, xml);
     }
 }
diff --git a/daemons/controld/controld_callbacks.c b/daemons/controld/controld_callbacks.c
index 9e3279010c..19319c06ff 100644
--- a/daemons/controld/controld_callbacks.c
+++ b/daemons/controld/controld_callbacks.c
@@ -1,392 +1,396 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <inttypes.h>           // PRIu32
 #include <stdbool.h>            // bool
 #include <stdio.h>              // NULL
 
 #include <sys/param.h>
 #include <string.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/cluster.h>
 #include <crm/cib.h>
 
 #include <pacemaker-controld.h>
 
 /* From join_dc... */
 extern gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source);
 
 void
 crmd_ha_msg_filter(xmlNode * msg)
 {
     if (AM_I_DC) {
         const char *sys_from = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM);
 
         if (pcmk__str_eq(sys_from, CRM_SYSTEM_DC, pcmk__str_casei)) {
             const char *from = crm_element_value(msg, PCMK__XA_SRC);
 
             if (!pcmk__str_eq(from, controld_globals.our_nodename,
                               pcmk__str_casei)) {
                 int level = LOG_INFO;
                 const char *op = crm_element_value(msg, PCMK__XA_CRM_TASK);
 
                 /* make sure the election happens NOW */
                 if (controld_globals.fsa_state != S_ELECTION) {
                     ha_msg_input_t new_input;
 
                     level = LOG_WARNING;
                     new_input.msg = msg;
                     register_fsa_error_adv(C_FSA_INTERNAL, I_ELECTION, NULL, &new_input,
                                            __func__);
                 }
 
                 do_crm_log(level, "Another DC detected: %s (op=%s)", from, op);
                 goto done;
             }
         }
 
     } else {
         const char *sys_to = crm_element_value(msg, PCMK__XA_CRM_SYS_TO);
 
         if (pcmk__str_eq(sys_to, CRM_SYSTEM_DC, pcmk__str_casei)) {
             return;
         }
     }
 
     /* crm_log_xml_trace(msg, "HA[inbound]"); */
     route_message(C_HA_MESSAGE, msg);
 
   done:
     controld_trigger_fsa();
 }
 
 /*!
  * \internal
  * \brief Check whether a node is online
  *
  * \param[in] node  Node to check
  *
  * \retval -1 if completely dead
  * \retval  0 if partially alive
  * \retval  1 if completely alive
  */
 static int
 node_alive(const pcmk__node_status_t *node)
 {
     if (pcmk_is_set(node->flags, pcmk__node_status_remote)) {
         // Pacemaker Remote nodes can't be partially alive
-        return pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_casei) ? 1: -1;
+        if (pcmk__str_eq(node->state, PCMK_VALUE_MEMBER, pcmk__str_none)) {
+            return 1;
+        }
+        return -1;
 
     } else if (pcmk__cluster_is_node_active(node)) {
         // Completely up cluster node: both cluster member and peer
         return 1;
 
     } else if (!pcmk_is_set(node->processes, crm_get_cluster_proc())
-               && !pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_casei)) {
+               && !pcmk__str_eq(node->state, PCMK_VALUE_MEMBER,
+                                pcmk__str_none)) {
         // Completely down cluster node: neither cluster member nor peer
         return -1;
     }
 
     // Partially up cluster node: only cluster member or only peer
     return 0;
 }
 
 #define state_text(state) ((state)? (const char *)(state) : "in unknown state")
 
 void
 peer_update_callback(enum pcmk__node_update type, pcmk__node_status_t *node,
                      const void *data)
 {
     uint32_t old = 0;
     bool appeared = FALSE;
     bool is_remote = pcmk_is_set(node->flags, pcmk__node_status_remote);
 
     controld_node_pending_timer(node);
 
     /* The controller waits to receive some information from the membership
      * layer before declaring itself operational. If this is being called for a
      * cluster node, indicate that we have it.
      */
     if (!is_remote) {
         controld_set_fsa_input_flags(R_PEER_DATA);
     }
 
     if ((type == pcmk__node_update_processes)
         && pcmk_is_set(node->processes, crm_get_cluster_proc())
         && !AM_I_DC
         && !is_remote) {
         /*
          * This is a hack until we can send to a nodeid and/or we fix node name lookups
          * These messages are ignored in crmd_ha_msg_filter()
          */
         xmlNode *query = create_request(CRM_OP_HELLO, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
 
         crm_debug("Sending hello to node %" PRIu32 " so that it learns our "
                   "node name",
                   node->cluster_layer_id);
         pcmk__cluster_send_message(node, pcmk__cluster_msg_controld, query);
 
         pcmk__xml_free(query);
     }
 
     if (node->name == NULL) {
         return;
     }
 
     switch (type) {
         case pcmk__node_update_name:
             /* If we've never seen the node, then it also won't be in the status section */
             crm_info("%s node %s is now %s",
                      (is_remote? "Remote" : "Cluster"),
                      node->name, state_text(node->state));
             return;
 
         case pcmk__node_update_state:
             /* This callback should not be called unless the state actually
              * changed, but here's a failsafe just in case.
              */
             CRM_CHECK(!pcmk__str_eq(data, node->state, pcmk__str_casei),
                       return);
 
             crm_info("%s node %s is now %s (was %s)",
                      (is_remote? "Remote" : "Cluster"),
                      node->name, state_text(node->state), state_text(data));
 
-            if (pcmk__str_eq(CRM_NODE_MEMBER, node->state, pcmk__str_casei)) {
+            if (pcmk__str_eq(PCMK_VALUE_MEMBER, node->state, pcmk__str_none)) {
                 appeared = TRUE;
                 if (!is_remote) {
                     remove_stonith_cleanup(node->name);
                 }
             } else {
                 controld_remove_failed_sync_node(node->name);
                 controld_remove_voter(node->name);
             }
 
             crmd_alert_node_event(node);
             break;
 
         case pcmk__node_update_processes:
             CRM_CHECK(data != NULL, return);
             old = *(const uint32_t *)data;
             appeared = pcmk_is_set(node->processes, crm_get_cluster_proc());
 
             {
                 const char *dc_s = controld_globals.dc_name;
 
                 if ((dc_s == NULL) && AM_I_DC) {
                     dc_s = PCMK_VALUE_TRUE;
                 }
 
                 crm_info("Node %s is %s a peer " QB_XS
                          " DC=%s old=%#07x new=%#07x",
                          node->name, (appeared? "now" : "no longer"),
                          pcmk__s(dc_s, "<none>"), old, node->processes);
             }
 
             if (!pcmk_is_set((node->processes ^ old), crm_get_cluster_proc())) {
                 /* Peer status did not change. This should not be possible,
                  * since we don't track process flags other than peer status.
                  */
                 crm_trace("Process flag %#7x did not change from %#7x to %#7x",
                           crm_get_cluster_proc(), old, node->processes);
                 return;
 
             }
 
             if (!appeared) {
                 node->peer_lost = time(NULL);
                 controld_remove_failed_sync_node(node->name);
                 controld_remove_voter(node->name);
             }
 
             if (!pcmk_is_set(controld_globals.fsa_input_register,
                              R_CIB_CONNECTED)) {
                 crm_trace("Ignoring peer status change because not connected to CIB");
                 return;
 
             } else if (controld_globals.fsa_state == S_STOPPING) {
                 crm_trace("Ignoring peer status change because stopping");
                 return;
             }
 
             if (!appeared
                 && pcmk__str_eq(node->name, controld_globals.our_nodename,
                                 pcmk__str_casei)) {
                 /* Did we get evicted? */
                 crm_notice("Our peer connection failed");
                 register_fsa_input(C_CRMD_STATUS_CALLBACK, I_ERROR, NULL);
 
             } else if (pcmk__str_eq(node->name, controld_globals.dc_name,
                                     pcmk__str_casei)
                        && !pcmk__cluster_is_node_active(node)) {
 
                 /* The DC has left, so delete its transient attributes and
                  * trigger a new election.
                  *
                  * A DC sends its shutdown request to all peers, who update the
                  * DC's expected state to down. This avoids fencing upon
                  * deletion of its transient attributes.
                  */
                 crm_notice("Our peer on the DC (%s) is dead",
                            controld_globals.dc_name);
 
                 register_fsa_input(C_CRMD_STATUS_CALLBACK, I_ELECTION, NULL);
                 controld_delete_node_state(node->name, controld_section_attrs,
                                            cib_none);
 
             } else if (AM_I_DC
                        || pcmk_is_set(controld_globals.flags, controld_dc_left)
                        || (controld_globals.dc_name == NULL)) {
                 /* This only needs to be done once, so normally the DC should do
                  * it. However if there is no DC, every node must do it, since
                  * there is no other way to ensure some one node does it.
                  */
                 if (appeared) {
                     te_trigger_stonith_history_sync(FALSE);
                 } else {
                     controld_delete_node_state(node->name,
                                                controld_section_attrs,
                                                cib_none);
                 }
             }
             break;
     }
 
     if (AM_I_DC) {
         xmlNode *update = NULL;
         int flags = node_update_peer;
         int alive = node_alive(node);
         pcmk__graph_action_t *down = match_down_event(node->xml_id);
 
         crm_trace("Alive=%d, appeared=%d, down=%d",
                   alive, appeared, (down? down->id : -1));
 
         if (appeared && (alive > 0) && !is_remote) {
             register_fsa_input_before(C_FSA_INTERNAL, I_NODE_JOIN, NULL);
         }
 
         if (down) {
             const char *task = crm_element_value(down->xml, PCMK_XA_OPERATION);
 
             if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
                 const bool confirmed =
                     pcmk_is_set(down->flags, pcmk__graph_action_confirmed);
 
                 /* tengine_stonith_callback() confirms fence actions */
                 crm_trace("Updating CIB %s fencer reported fencing of %s complete",
                           (confirmed? "after" : "before"), node->name);
 
             } else if (!appeared && pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN,
                                                  pcmk__str_casei)) {
 
                 // Shutdown actions are immediately confirmed (i.e. no_wait)
                 if (!is_remote) {
                     flags |= node_update_join | node_update_expected;
                     crmd_peer_down(node, FALSE);
                     check_join_state(controld_globals.fsa_state, __func__);
                 }
                 if (alive >= 0) {
                     crm_info("%s of peer %s is in progress " QB_XS " action=%d",
                              task, node->name, down->id);
                 } else {
                     crm_notice("%s of peer %s is complete " QB_XS " action=%d",
                                task, node->name, down->id);
                     pcmk__update_graph(controld_globals.transition_graph, down);
                     trigger_graph();
                 }
 
             } else {
                 const char *liveness = "alive";
 
                 if (alive == 0) {
                     liveness = "partially alive";
 
                 } else if (alive < 0) {
                     liveness = "dead";
                 }
 
                 crm_trace("Node %s is %s, was expected to %s (op %d)",
                           node->name, liveness, task, down->id);
             }
 
         } else if (appeared == FALSE) {
             if ((controld_globals.transition_graph == NULL)
                 || (controld_globals.transition_graph->id == -1)) {
                 crm_info("Stonith/shutdown of node %s is unknown to the "
                          "current DC", node->name);
             } else {
                 crm_warn("Stonith/shutdown of node %s was not expected",
                          node->name);
             }
             if (!is_remote) {
                 crm_update_peer_join(__func__, node, controld_join_none);
                 check_join_state(controld_globals.fsa_state, __func__);
             }
             abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                              "Node failure", NULL);
             fail_incompletable_actions(controld_globals.transition_graph,
                                        node->xml_id);
 
         } else {
             crm_trace("Node %s came up, was not expected to be down",
                       node->name);
         }
 
         if (is_remote) {
             /* A pacemaker_remote node won't have its cluster status updated
              * in the CIB by membership-layer callbacks, so do it here.
              */
             flags |= node_update_cluster;
 
             /* Trigger resource placement on newly integrated nodes */
             if (appeared) {
                 abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                                  "Pacemaker Remote node integrated", NULL);
             }
         }
 
         if (!appeared && (type == pcmk__node_update_processes)
             && (node->when_member > 1)) {
             /* The node left CPG but is still a cluster member. Set its
              * membership time to 1 to record it in the cluster state as a
              * boolean, so we don't fence it due to
              * PCMK_OPT_NODE_PENDING_TIMEOUT.
              */
             node->when_member = 1;
             flags |= node_update_cluster;
             controld_node_pending_timer(node);
         }
 
         /* Update the CIB node state */
         update = create_node_state_update(node, flags, NULL, __func__);
         if (update == NULL) {
             crm_debug("Node state update not yet possible for %s", node->name);
         } else {
             fsa_cib_anon_update(PCMK_XE_STATUS, update);
         }
         pcmk__xml_free(update);
     }
 
     controld_trigger_fsa();
 }
 
 gboolean
 crm_fsa_trigger(gpointer user_data)
 {
     crm_trace("Invoked (queue len: %d)",
               g_list_length(controld_globals.fsa_message_queue));
     s_crmd_fsa(C_FSA_INTERNAL);
     crm_trace("Exited  (queue len: %d)",
               g_list_length(controld_globals.fsa_message_queue));
     return TRUE;
 }
diff --git a/daemons/controld/controld_membership.c b/daemons/controld/controld_membership.c
index 29ff8314ad..bdf8199ecf 100644
--- a/daemons/controld/controld_membership.c
+++ b/daemons/controld/controld_membership.c
@@ -1,467 +1,467 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 /* put these first so that uuid_t is defined without conflicts */
 #include <crm_internal.h>
 
 #include <string.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/cluster/internal.h>
 
 #include <pacemaker-controld.h>
 
 void post_cache_update(int instance);
 
 extern gboolean check_join_state(enum crmd_fsa_state cur_state, const char *source);
 
 static void
 reap_dead_nodes(gpointer key, gpointer value, gpointer user_data)
 {
     pcmk__node_status_t *node = value;
 
     if (pcmk__cluster_is_node_active(node)) {
         return;
     }
 
     crm_update_peer_join(__func__, node, controld_join_none);
 
     if ((node != NULL) && (node->name != NULL)) {
         if (pcmk__str_eq(controld_globals.our_nodename, node->name,
                          pcmk__str_casei)) {
             crm_err("We're not part of the cluster anymore");
             register_fsa_input(C_FSA_INTERNAL, I_ERROR, NULL);
 
         } else if (!AM_I_DC
                    && pcmk__str_eq(node->name, controld_globals.dc_name,
                                    pcmk__str_casei)) {
             crm_warn("Our DC node (%s) left the cluster", node->name);
             register_fsa_input(C_FSA_INTERNAL, I_ELECTION, NULL);
         }
     }
 
     if ((controld_globals.fsa_state == S_INTEGRATION)
         || (controld_globals.fsa_state == S_FINALIZE_JOIN)) {
         check_join_state(controld_globals.fsa_state, __func__);
     }
     if ((node != NULL) && (node->xml_id != NULL)) {
         fail_incompletable_actions(controld_globals.transition_graph,
                                    node->xml_id);
     }
 }
 
 void
 post_cache_update(int instance)
 {
     xmlNode *no_op = NULL;
 
     crm_peer_seq = instance;
     crm_debug("Updated cache after membership event %d.", instance);
 
     g_hash_table_foreach(crm_peer_cache, reap_dead_nodes, NULL);
     controld_set_fsa_input_flags(R_MEMBERSHIP);
 
     if (AM_I_DC) {
         populate_cib_nodes(node_update_quick | node_update_cluster | node_update_peer |
                            node_update_expected, __func__);
     }
 
     /*
      * If we lost nodes, we should re-check the election status
      * Safe to call outside of an election
      */
     controld_set_fsa_action_flags(A_ELECTION_CHECK);
     controld_trigger_fsa();
 
     /* Membership changed, remind everyone we're here.
      * This will aid detection of duplicate DCs
      */
     no_op = create_request(CRM_OP_NOOP, NULL, NULL, CRM_SYSTEM_CRMD,
                            AM_I_DC ? CRM_SYSTEM_DC : CRM_SYSTEM_CRMD, NULL);
     pcmk__cluster_send_message(NULL, pcmk__cluster_msg_controld, no_op);
     pcmk__xml_free(no_op);
 }
 
 static void
 crmd_node_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     fsa_data_t *msg_data = NULL;
 
     if (rc == pcmk_ok) {
         crm_trace("Node update %d complete", call_id);
 
     } else if(call_id < pcmk_ok) {
         crm_err("Node update failed: %s (%d)", pcmk_strerror(call_id), call_id);
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
 
     } else {
         crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc);
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 /*!
  * \internal
  * \brief Create an XML node state tag with updates
  *
  * \param[in,out] node    Node whose state will be used for update
  * \param[in]     flags   Bitmask of node_update_flags indicating what to update
  * \param[in,out] parent  XML node to contain update (or NULL)
  * \param[in]     source  Who requested the update (only used for logging)
  *
  * \return Pointer to created node state tag
  */
 xmlNode *
 create_node_state_update(pcmk__node_status_t *node, int flags,
                          xmlNode *parent, const char *source)
 {
     const char *value = NULL;
     xmlNode *node_state;
 
     if (!node->state) {
         crm_info("Node update for %s cancelled: no state, not seen yet",
                  node->name);
        return NULL;
     }
 
     node_state = pcmk__xe_create(parent, PCMK__XE_NODE_STATE);
 
     if (pcmk_is_set(node->flags, pcmk__node_status_remote)) {
         pcmk__xe_set_bool_attr(node_state, PCMK_XA_REMOTE_NODE, true);
     }
 
     if (crm_xml_add(node_state, PCMK_XA_ID,
                     pcmk__cluster_node_uuid(node)) == NULL) {
         crm_info("Node update for %s cancelled: no ID", node->name);
         pcmk__xml_free(node_state);
         return NULL;
     }
 
     crm_xml_add(node_state, PCMK_XA_UNAME, node->name);
 
     if ((flags & node_update_cluster) && node->state) {
         if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) {
             // A value 0 means the node is not a cluster member.
             crm_xml_add_ll(node_state, PCMK__XA_IN_CCM, node->when_member);
 
         } else {
             pcmk__xe_set_bool_attr(node_state, PCMK__XA_IN_CCM,
-                                   pcmk__str_eq(node->state, CRM_NODE_MEMBER,
-                                                pcmk__str_casei));
+                                   pcmk__str_eq(node->state, PCMK_VALUE_MEMBER,
+                                                pcmk__str_none));
         }
     }
 
     if (!pcmk_is_set(node->flags, pcmk__node_status_remote)) {
         if (flags & node_update_peer) {
             if (compare_version(controld_globals.dc_version, "3.18.0") >= 0) {
                 // A value 0 means the peer is offline in CPG.
                 crm_xml_add_ll(node_state, PCMK_XA_CRMD, node->when_online);
 
             } else {
                 // @COMPAT DCs < 2.1.7 use online/offline rather than timestamp
                 value = PCMK_VALUE_OFFLINE;
                 if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
                     value = PCMK_VALUE_ONLINE;
                 }
                 crm_xml_add(node_state, PCMK_XA_CRMD, value);
             }
         }
 
         if (flags & node_update_join) {
             if (controld_get_join_phase(node) <= controld_join_none) {
                 value = CRMD_JOINSTATE_DOWN;
             } else {
                 value = CRMD_JOINSTATE_MEMBER;
             }
             crm_xml_add(node_state, PCMK__XA_JOIN, value);
         }
 
         if (flags & node_update_expected) {
             crm_xml_add(node_state, PCMK_XA_EXPECTED, node->expected);
         }
     }
 
     crm_xml_add(node_state, PCMK_XA_CRM_DEBUG_ORIGIN, source);
 
     return node_state;
 }
 
 static void
 remove_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
                                  xmlNode * output, void *user_data)
 {
     char *node_uuid = user_data;
 
     do_crm_log_unlikely(rc == 0 ? LOG_DEBUG : LOG_NOTICE,
                         "Deletion of the unknown conflicting node \"%s\": %s (rc=%d)",
                         node_uuid, pcmk_strerror(rc), rc);
 }
 
 static void
 search_conflicting_node_callback(xmlNode * msg, int call_id, int rc,
                                  xmlNode * output, void *user_data)
 {
     char *new_node_uuid = user_data;
     xmlNode *node_xml = NULL;
 
     if (rc != pcmk_ok) {
         if (rc != -ENXIO) {
             crm_notice("Searching conflicting nodes for %s failed: %s (%d)",
                        new_node_uuid, pcmk_strerror(rc), rc);
         }
         return;
 
     } else if (output == NULL) {
         return;
     }
 
     if (pcmk__xe_is(output, PCMK_XE_NODE)) {
         node_xml = output;
 
     } else {
         node_xml = pcmk__xe_first_child(output, PCMK_XE_NODE, NULL, NULL);
     }
 
     for (; node_xml != NULL; node_xml = pcmk__xe_next_same(node_xml)) {
         const char *node_uuid = NULL;
         const char *node_uname = NULL;
         GHashTableIter iter;
         pcmk__node_status_t *node = NULL;
         gboolean known = FALSE;
 
         node_uuid = crm_element_value(node_xml, PCMK_XA_ID);
         node_uname = crm_element_value(node_xml, PCMK_XA_UNAME);
 
         if (node_uuid == NULL || node_uname == NULL) {
             continue;
         }
 
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if ((node != NULL)
                 && pcmk__str_eq(node->xml_id, node_uuid, pcmk__str_casei)
                 && pcmk__str_eq(node->name, node_uname, pcmk__str_casei)) {
 
                 known = TRUE;
                 break;
             }
         }
 
         if (known == FALSE) {
             cib_t *cib_conn = controld_globals.cib_conn;
             int delete_call_id = 0;
             xmlNode *node_state_xml = NULL;
 
             crm_notice("Deleting unknown node %s/%s which has conflicting uname with %s",
                        node_uuid, node_uname, new_node_uuid);
 
             delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_NODES,
                                                     node_xml, cib_none);
             fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid),
                                       remove_conflicting_node_callback);
 
             node_state_xml = pcmk__xe_create(NULL, PCMK__XE_NODE_STATE);
             crm_xml_add(node_state_xml, PCMK_XA_ID, node_uuid);
             crm_xml_add(node_state_xml, PCMK_XA_UNAME, node_uname);
 
             delete_call_id = cib_conn->cmds->remove(cib_conn, PCMK_XE_STATUS,
                                                     node_state_xml, cib_none);
             fsa_register_cib_callback(delete_call_id, pcmk__str_copy(node_uuid),
                                       remove_conflicting_node_callback);
             pcmk__xml_free(node_state_xml);
         }
     }
 }
 
 static void
 node_list_update_callback(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     fsa_data_t *msg_data = NULL;
 
     if(call_id < pcmk_ok) {
         crm_err("Node list update failed: %s (%d)", pcmk_strerror(call_id), call_id);
         crm_log_xml_debug(msg, "update:failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
 
     } else if(rc < pcmk_ok) {
         crm_err("Node update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc);
         crm_log_xml_debug(msg, "update:failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 void
 populate_cib_nodes(enum node_update_flags flags, const char *source)
 {
     cib_t *cib_conn = controld_globals.cib_conn;
 
     int call_id = 0;
     gboolean from_hashtable = TRUE;
     xmlNode *node_list = pcmk__xe_create(NULL, PCMK_XE_NODES);
 
 #if SUPPORT_COROSYNC
     if (!pcmk_is_set(flags, node_update_quick)
         && (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync)) {
 
         from_hashtable = pcmk__corosync_add_nodes(node_list);
     }
 #endif
 
     if (from_hashtable) {
         GHashTableIter iter;
         pcmk__node_status_t *node = NULL;
         GString *xpath = NULL;
 
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             xmlNode *new_node = NULL;
 
             if ((node->xml_id != NULL) && (node->name != NULL)) {
                 crm_trace("Creating node entry for %s/%s",
                           node->name, node->xml_id);
                 if (xpath == NULL) {
                     xpath = g_string_sized_new(512);
                 } else {
                     g_string_truncate(xpath, 0);
                 }
 
                 /* We need both to be valid */
                 new_node = pcmk__xe_create(node_list, PCMK_XE_NODE);
                 crm_xml_add(new_node, PCMK_XA_ID, node->xml_id);
                 crm_xml_add(new_node, PCMK_XA_UNAME, node->name);
 
                 /* Search and remove unknown nodes with the conflicting uname from CIB */
                 pcmk__g_strcat(xpath,
                                "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION
                                "/" PCMK_XE_NODES "/" PCMK_XE_NODE
                                "[@" PCMK_XA_UNAME "='", node->name, "']"
                                "[@" PCMK_XA_ID "!='", node->xml_id, "']", NULL);
 
                 call_id = cib_conn->cmds->query(cib_conn,
                                                 (const char *) xpath->str, NULL,
                                                 cib_xpath);
                 fsa_register_cib_callback(call_id, pcmk__str_copy(node->xml_id),
                                           search_conflicting_node_callback);
             }
         }
 
         if (xpath != NULL) {
             g_string_free(xpath, TRUE);
         }
     }
 
     crm_trace("Populating <nodes> section from %s", from_hashtable ? "hashtable" : "cluster");
 
     if ((controld_update_cib(PCMK_XE_NODES, node_list, cib_none,
                              node_list_update_callback) == pcmk_rc_ok)
          && (crm_peer_cache != NULL) && AM_I_DC) {
         /*
          * There is no need to update the local CIB with our values if
          * we've not seen valid membership data
          */
         GHashTableIter iter;
         pcmk__node_status_t *node = NULL;
 
         pcmk__xml_free(node_list);
         node_list = pcmk__xe_create(NULL, PCMK_XE_STATUS);
 
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             create_node_state_update(node, flags, node_list, source);
         }
 
         if (crm_remote_peer_cache) {
             g_hash_table_iter_init(&iter, crm_remote_peer_cache);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
                 create_node_state_update(node, flags, node_list, source);
             }
         }
 
         controld_update_cib(PCMK_XE_STATUS, node_list, cib_none,
                             crmd_node_update_complete);
     }
     pcmk__xml_free(node_list);
 }
 
 static void
 cib_quorum_update_complete(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     fsa_data_t *msg_data = NULL;
 
     if (rc == pcmk_ok) {
         crm_trace("Quorum update %d complete", call_id);
 
     } else {
         crm_err("Quorum update %d failed: %s (%d)", call_id, pcmk_strerror(rc), rc);
         crm_log_xml_debug(msg, "failed");
         register_fsa_error(C_FSA_INTERNAL, I_ERROR, NULL);
     }
 }
 
 void
 crm_update_quorum(gboolean quorum, gboolean force_update)
 {
     bool has_quorum = pcmk_is_set(controld_globals.flags, controld_has_quorum);
 
     if (quorum) {
         controld_set_global_flags(controld_ever_had_quorum);
 
     } else if (pcmk_all_flags_set(controld_globals.flags,
                                   controld_ever_had_quorum
                                   |controld_no_quorum_suicide)) {
         pcmk__panic(__func__);
     }
 
     if (AM_I_DC
         && ((has_quorum && !quorum) || (!has_quorum && quorum)
             || force_update)) {
         xmlNode *update = NULL;
 
         update = pcmk__xe_create(NULL, PCMK_XE_CIB);
         crm_xml_add_int(update, PCMK_XA_HAVE_QUORUM, quorum);
         crm_xml_add(update, PCMK_XA_DC_UUID, controld_globals.our_uuid);
 
         crm_debug("Updating quorum status to %s", pcmk__btoa(quorum));
         controld_update_cib(PCMK_XE_CIB, update, cib_none,
                             cib_quorum_update_complete);
         pcmk__xml_free(update);
 
         /* Quorum changes usually cause a new transition via other activity:
          * quorum gained via a node joining will abort via the node join,
          * and quorum lost via a node leaving will usually abort via resource
          * activity and/or fencing.
          *
          * However, it is possible that nothing else causes a transition (e.g.
          * someone forces quorum via corosync-cmaptcl, or quorum is lost due to
          * a node in standby shutting down cleanly), so here ensure a new
          * transition is triggered.
          */
         if (quorum) {
             /* If quorum was gained, abort after a short delay, in case multiple
              * nodes are joining around the same time, so the one that brings us
              * to quorum doesn't cause all the remaining ones to be fenced.
              */
             abort_after_delay(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                               "Quorum gained", 5000);
         } else {
             abort_transition(PCMK_SCORE_INFINITY, pcmk__graph_restart,
                              "Quorum lost", NULL);
         }
     }
 
     if (quorum) {
         controld_set_global_flags(controld_has_quorum);
     } else {
         controld_clear_global_flags(controld_has_quorum);
     }
 }
diff --git a/daemons/controld/controld_messages.c b/daemons/controld/controld_messages.c
index ab6da9befc..41f8f47bce 100644
--- a/daemons/controld/controld_messages.c
+++ b/daemons/controld/controld_messages.c
@@ -1,1361 +1,1361 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <string.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/cluster/internal.h>
 #include <crm/cib.h>
 #include <crm/common/ipc_internal.h>
 
 #include <pacemaker-controld.h>
 
 static enum crmd_fsa_input handle_message(xmlNode *msg,
                                           enum crmd_fsa_cause cause);
 static void handle_response(xmlNode *stored_msg);
 static enum crmd_fsa_input handle_request(xmlNode *stored_msg,
                                           enum crmd_fsa_cause cause);
 static enum crmd_fsa_input handle_shutdown_request(xmlNode *stored_msg);
 static void send_msg_via_ipc(xmlNode * msg, const char *sys);
 
 /* debug only, can wrap all it likes */
 static int last_data_id = 0;
 
 void
 register_fsa_error_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input,
                        fsa_data_t * cur_data, void *new_data, const char *raised_from)
 {
     /* save the current actions if any */
     if (controld_globals.fsa_actions != A_NOTHING) {
         register_fsa_input_adv(cur_data ? cur_data->fsa_cause : C_FSA_INTERNAL,
                                I_NULL, cur_data ? cur_data->data : NULL,
                                controld_globals.fsa_actions, TRUE, __func__);
     }
 
     /* reset the action list */
     crm_info("Resetting the current action list");
     fsa_dump_actions(controld_globals.fsa_actions, "Drop");
     controld_globals.fsa_actions = A_NOTHING;
 
     /* register the error */
     register_fsa_input_adv(cause, input, new_data, A_NOTHING, TRUE, raised_from);
 }
 
 void
 register_fsa_input_adv(enum crmd_fsa_cause cause, enum crmd_fsa_input input,
                        void *data, uint64_t with_actions,
                        gboolean prepend, const char *raised_from)
 {
     unsigned old_len = g_list_length(controld_globals.fsa_message_queue);
     fsa_data_t *fsa_data = NULL;
 
     if (raised_from == NULL) {
         raised_from = "<unknown>";
     }
 
     if (input == I_NULL && with_actions == A_NOTHING /* && data == NULL */ ) {
         /* no point doing anything */
         crm_err("Cannot add entry to queue: no input and no action");
         return;
     }
 
     if (input == I_WAIT_FOR_EVENT) {
         controld_set_global_flags(controld_fsa_is_stalled);
         crm_debug("Stalling the FSA pending further input: source=%s cause=%s data=%p queue=%d",
                   raised_from, fsa_cause2string(cause), data, old_len);
 
         if (old_len > 0) {
             fsa_dump_queue(LOG_TRACE);
             prepend = FALSE;
         }
 
         if (data == NULL) {
             controld_set_fsa_action_flags(with_actions);
             fsa_dump_actions(with_actions, "Restored");
             return;
         }
 
         /* Store everything in the new event and reset
          * controld_globals.fsa_actions
          */
         with_actions |= controld_globals.fsa_actions;
         controld_globals.fsa_actions = A_NOTHING;
     }
 
     last_data_id++;
     crm_trace("%s %s FSA input %d (%s) due to %s, %s data",
               raised_from, (prepend? "prepended" : "appended"), last_data_id,
               fsa_input2string(input), fsa_cause2string(cause),
               (data? "with" : "without"));
 
     fsa_data = pcmk__assert_alloc(1, sizeof(fsa_data_t));
     fsa_data->id = last_data_id;
     fsa_data->fsa_input = input;
     fsa_data->fsa_cause = cause;
     fsa_data->origin = raised_from;
     fsa_data->data = NULL;
     fsa_data->data_type = fsa_dt_none;
     fsa_data->actions = with_actions;
 
     if (with_actions != A_NOTHING) {
         crm_trace("Adding actions %.16llx to input",
                   (unsigned long long) with_actions);
     }
 
     if (data != NULL) {
         switch (cause) {
             case C_FSA_INTERNAL:
             case C_CRMD_STATUS_CALLBACK:
             case C_IPC_MESSAGE:
             case C_HA_MESSAGE:
                 CRM_CHECK(((ha_msg_input_t *) data)->msg != NULL,
                           crm_err("Bogus data from %s", raised_from));
                 crm_trace("Copying %s data from %s as cluster message data",
                           fsa_cause2string(cause), raised_from);
                 fsa_data->data = copy_ha_msg_input(data);
                 fsa_data->data_type = fsa_dt_ha_msg;
                 break;
 
             case C_LRM_OP_CALLBACK:
                 crm_trace("Copying %s data from %s as lrmd_event_data_t",
                           fsa_cause2string(cause), raised_from);
                 fsa_data->data = lrmd_copy_event((lrmd_event_data_t *) data);
                 fsa_data->data_type = fsa_dt_lrm;
                 break;
 
             case C_TIMER_POPPED:
             case C_SHUTDOWN:
             case C_UNKNOWN:
             case C_STARTUP:
                 crm_crit("Copying %s data (from %s) is not yet implemented",
                          fsa_cause2string(cause), raised_from);
                 crmd_exit(CRM_EX_SOFTWARE);
                 break;
         }
     }
 
     /* make sure to free it properly later */
     if (prepend) {
         controld_globals.fsa_message_queue
             = g_list_prepend(controld_globals.fsa_message_queue, fsa_data);
     } else {
         controld_globals.fsa_message_queue
             = g_list_append(controld_globals.fsa_message_queue, fsa_data);
     }
 
     crm_trace("FSA message queue length is %d",
               g_list_length(controld_globals.fsa_message_queue));
 
     /* fsa_dump_queue(LOG_TRACE); */
 
     if (old_len == g_list_length(controld_globals.fsa_message_queue)) {
         crm_err("Couldn't add message to the queue");
     }
 
     if (input != I_WAIT_FOR_EVENT) {
         controld_trigger_fsa();
     }
 }
 
 void
 fsa_dump_queue(int log_level)
 {
     int offset = 0;
 
     for (GList *iter = controld_globals.fsa_message_queue; iter != NULL;
          iter = iter->next) {
         fsa_data_t *data = (fsa_data_t *) iter->data;
 
         do_crm_log_unlikely(log_level,
                             "queue[%d.%d]: input %s raised by %s(%p.%d)\t(cause=%s)",
                             offset++, data->id, fsa_input2string(data->fsa_input),
                             data->origin, data->data, data->data_type,
                             fsa_cause2string(data->fsa_cause));
     }
 }
 
 ha_msg_input_t *
 copy_ha_msg_input(ha_msg_input_t * orig)
 {
     xmlNode *wrapper = NULL;
 
     ha_msg_input_t *copy = pcmk__assert_alloc(1, sizeof(ha_msg_input_t));
 
     copy->msg = (orig != NULL)? pcmk__xml_copy(NULL, orig->msg) : NULL;
 
     wrapper = pcmk__xe_first_child(copy->msg, PCMK__XE_CRM_XML, NULL, NULL);
     copy->xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
     return copy;
 }
 
 void
 delete_fsa_input(fsa_data_t * fsa_data)
 {
     lrmd_event_data_t *op = NULL;
     xmlNode *foo = NULL;
 
     if (fsa_data == NULL) {
         return;
     }
     crm_trace("About to free %s data", fsa_cause2string(fsa_data->fsa_cause));
 
     if (fsa_data->data != NULL) {
         switch (fsa_data->data_type) {
             case fsa_dt_ha_msg:
                 delete_ha_msg_input(fsa_data->data);
                 break;
 
             case fsa_dt_xml:
                 foo = fsa_data->data;
                 pcmk__xml_free(foo);
                 break;
 
             case fsa_dt_lrm:
                 op = (lrmd_event_data_t *) fsa_data->data;
                 lrmd_free_event(op);
                 break;
 
             case fsa_dt_none:
                 if (fsa_data->data != NULL) {
                     crm_err("Don't know how to free %s data from %s",
                             fsa_cause2string(fsa_data->fsa_cause), fsa_data->origin);
                     crmd_exit(CRM_EX_SOFTWARE);
                 }
                 break;
         }
         crm_trace("%s data freed", fsa_cause2string(fsa_data->fsa_cause));
     }
 
     free(fsa_data);
 }
 
 /* returns the next message */
 fsa_data_t *
 get_message(void)
 {
     fsa_data_t *message
         = (fsa_data_t *) controld_globals.fsa_message_queue->data;
 
     controld_globals.fsa_message_queue
         = g_list_remove(controld_globals.fsa_message_queue, message);
     crm_trace("Processing input %d", message->id);
     return message;
 }
 
 void *
 fsa_typed_data_adv(fsa_data_t * fsa_data, enum fsa_data_type a_type, const char *caller)
 {
     void *ret_val = NULL;
 
     if (fsa_data == NULL) {
         crm_err("%s: No FSA data available", caller);
 
     } else if (fsa_data->data == NULL) {
         crm_err("%s: No message data available. Origin: %s", caller, fsa_data->origin);
 
     } else if (fsa_data->data_type != a_type) {
         crm_crit("%s: Message data was the wrong type! %d vs. requested=%d.  Origin: %s",
                  caller, fsa_data->data_type, a_type, fsa_data->origin);
         CRM_ASSERT(fsa_data->data_type == a_type);
     } else {
         ret_val = fsa_data->data;
     }
 
     return ret_val;
 }
 
 /*	A_MSG_ROUTE	*/
 void
 do_msg_route(long long action,
              enum crmd_fsa_cause cause,
              enum crmd_fsa_state cur_state,
              enum crmd_fsa_input current_input, fsa_data_t * msg_data)
 {
     ha_msg_input_t *input = fsa_typed_data(fsa_dt_ha_msg);
 
     route_message(msg_data->fsa_cause, input->msg);
 }
 
 void
 route_message(enum crmd_fsa_cause cause, xmlNode * input)
 {
     ha_msg_input_t fsa_input;
     enum crmd_fsa_input result = I_NULL;
 
     fsa_input.msg = input;
     CRM_CHECK(cause == C_IPC_MESSAGE || cause == C_HA_MESSAGE, return);
 
     /* try passing the buck first */
     if (relay_message(input, cause == C_IPC_MESSAGE)) {
         return;
     }
 
     /* handle locally */
     result = handle_message(input, cause);
 
     /* done or process later? */
     switch (result) {
         case I_NULL:
         case I_CIB_OP:
         case I_ROUTER:
         case I_NODE_JOIN:
         case I_JOIN_REQUEST:
         case I_JOIN_RESULT:
             break;
         default:
             /* Defering local processing of message */
             register_fsa_input_later(cause, result, &fsa_input);
             return;
     }
 
     if (result != I_NULL) {
         /* add to the front of the queue */
         register_fsa_input(cause, result, &fsa_input);
     }
 }
 
 gboolean
 relay_message(xmlNode * msg, gboolean originated_locally)
 {
     enum pcmk__cluster_msg dest = pcmk__cluster_msg_unknown;
     bool is_for_dc = false;
     bool is_for_dcib = false;
     bool is_for_te = false;
     bool is_for_crm = false;
     bool is_for_cib = false;
     bool is_local = false;
     bool broadcast = false;
     const char *host_to = NULL;
     const char *sys_to = NULL;
     const char *sys_from = NULL;
     const char *type = NULL;
     const char *task = NULL;
     const char *ref = NULL;
     pcmk__node_status_t *node_to = NULL;
 
     CRM_CHECK(msg != NULL, return TRUE);
 
     host_to = crm_element_value(msg, PCMK__XA_CRM_HOST_TO);
     sys_to = crm_element_value(msg, PCMK__XA_CRM_SYS_TO);
     sys_from = crm_element_value(msg, PCMK__XA_CRM_SYS_FROM);
     type = crm_element_value(msg, PCMK__XA_T);
     task = crm_element_value(msg, PCMK__XA_CRM_TASK);
     ref = crm_element_value(msg, PCMK_XA_REFERENCE);
 
     broadcast = pcmk__str_empty(host_to);
 
     if (ref == NULL) {
         ref = "without reference ID";
     }
 
     if (pcmk__str_eq(task, CRM_OP_HELLO, pcmk__str_casei)) {
         crm_trace("Received hello %s from %s (no processing needed)",
                   ref, pcmk__s(sys_from, "unidentified source"));
         crm_log_xml_trace(msg, "hello");
         return TRUE;
     }
 
     // Require message type (set by create_request())
     if (!pcmk__str_eq(type, PCMK__VALUE_CRMD, pcmk__str_none)) {
         crm_warn("Ignoring invalid message %s with type '%s' "
                  "(not '" PCMK__VALUE_CRMD "')",
                  ref, pcmk__s(type, ""));
         crm_log_xml_trace(msg, "ignored");
         return TRUE;
     }
 
     // Require a destination subsystem (also set by create_request())
     if (sys_to == NULL) {
         crm_warn("Ignoring invalid message %s with no " PCMK__XA_CRM_SYS_TO,
                  ref);
         crm_log_xml_trace(msg, "ignored");
         return TRUE;
     }
 
     // Get the message type appropriate to the destination subsystem
     if (pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync) {
         dest = pcmk__cluster_parse_msg_type(sys_to);
         if (dest == pcmk__cluster_msg_unknown) {
             /* Unrecognized value, use a sane default
              *
              * @TODO Maybe we should bail instead
              */
             dest = pcmk__cluster_msg_controld;
         }
     }
 
     is_for_dc = (strcasecmp(CRM_SYSTEM_DC, sys_to) == 0);
     is_for_dcib = (strcasecmp(CRM_SYSTEM_DCIB, sys_to) == 0);
     is_for_te = (strcasecmp(CRM_SYSTEM_TENGINE, sys_to) == 0);
     is_for_cib = (strcasecmp(CRM_SYSTEM_CIB, sys_to) == 0);
     is_for_crm = (strcasecmp(CRM_SYSTEM_CRMD, sys_to) == 0);
 
     // Check whether message should be processed locally
     is_local = false;
     if (broadcast) {
         if (is_for_dc || is_for_te) {
             is_local = false;
 
         } else if (is_for_crm) {
             if (pcmk__strcase_any_of(task, CRM_OP_NODE_INFO,
                                      PCMK__CONTROLD_CMD_NODES, NULL)) {
                 /* Node info requests do not specify a host, which is normally
                  * treated as "all hosts", because the whole point is that the
                  * client may not know the local node name. Always handle these
                  * requests locally.
                  */
                 is_local = true;
             } else {
                 is_local = !originated_locally;
             }
 
         } else {
             is_local = true;
         }
 
     } else if (pcmk__str_eq(controld_globals.our_nodename, host_to,
                             pcmk__str_casei)) {
         is_local = true;
 
     } else if (is_for_crm && pcmk__str_eq(task, CRM_OP_LRM_DELETE, pcmk__str_casei)) {
         xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL,
                                                 NULL);
         xmlNode *msg_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
         const char *mode = crm_element_value(msg_data, PCMK__XA_MODE);
 
         if (pcmk__str_eq(mode, PCMK__VALUE_CIB, pcmk__str_none)) {
             // Local delete of an offline node's resource history
             is_local = true;
         }
     }
 
     // Check whether message should be relayed
 
     if (is_for_dc || is_for_dcib || is_for_te) {
         if (AM_I_DC) {
             if (is_for_te) {
                 crm_trace("Route message %s locally as transition request",
                           ref);
                 crm_log_xml_trace(msg, sys_to);
                 send_msg_via_ipc(msg, sys_to);
                 return TRUE; // No further processing of message is needed
             }
             crm_trace("Route message %s locally as DC request", ref);
             return FALSE; // More to be done by caller
         }
 
         if (originated_locally
             && !pcmk__strcase_any_of(sys_from, CRM_SYSTEM_PENGINE,
                                      CRM_SYSTEM_TENGINE, NULL)) {
             crm_trace("Relay message %s to DC (via %s)",
                       ref, pcmk__s(host_to, "broadcast"));
             crm_log_xml_trace(msg, "relayed");
             if (!broadcast) {
                 node_to = pcmk__get_node(0, host_to, NULL,
                                          pcmk__node_search_cluster_member);
             }
             pcmk__cluster_send_message(node_to, dest, msg);
             return TRUE;
         }
 
         /* Transition engine and scheduler messages are sent only to the DC on
          * the same node. If we are no longer the DC, discard this message.
          */
         crm_trace("Ignoring message %s because we are no longer DC", ref);
         crm_log_xml_trace(msg, "ignored");
         return TRUE; // No further processing of message is needed
     }
 
     if (is_local) {
         if (is_for_crm || is_for_cib) {
             crm_trace("Route message %s locally as controller request", ref);
             return FALSE; // More to be done by caller
         }
         crm_trace("Relay message %s locally to %s", ref, sys_to);
         crm_log_xml_trace(msg, "IPC-relay");
         send_msg_via_ipc(msg, sys_to);
         return TRUE;
     }
 
     if (!broadcast) {
         node_to = pcmk__search_node_caches(0, host_to,
                                            pcmk__node_search_cluster_member);
         if (node_to == NULL) {
             crm_warn("Ignoring message %s because node %s is unknown",
                      ref, host_to);
             crm_log_xml_trace(msg, "ignored");
             return TRUE;
         }
     }
 
     crm_trace("Relay message %s to %s",
               ref, pcmk__s(host_to, "all peers"));
     crm_log_xml_trace(msg, "relayed");
     pcmk__cluster_send_message(node_to, dest, msg);
     return TRUE;
 }
 
 // Return true if field contains a positive integer
 static bool
 authorize_version(xmlNode *message_data, const char *field,
                   const char *client_name, const char *ref, const char *uuid)
 {
     const char *version = crm_element_value(message_data, field);
     long long version_num;
 
     if ((pcmk__scan_ll(version, &version_num, -1LL) != pcmk_rc_ok)
         || (version_num < 0LL)) {
 
         crm_warn("Rejected IPC hello from %s: '%s' is not a valid protocol %s "
                  QB_XS " ref=%s uuid=%s",
                  client_name, ((version == NULL)? "" : version),
                  field, (ref? ref : "none"), uuid);
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether a client IPC message is acceptable
  *
  * If a given client IPC message is a hello, "authorize" it by ensuring it has
  * valid information such as a protocol version, and return false indicating
  * that nothing further needs to be done with the message. If the message is not
  * a hello, just return true to indicate it needs further processing.
  *
  * \param[in]     client_msg     XML of IPC message
  * \param[in,out] curr_client    If IPC is not proxied, client that sent message
  * \param[in]     proxy_session  If IPC is proxied, the session ID
  *
  * \return true if message needs further processing, false if it doesn't
  */
 bool
 controld_authorize_ipc_message(const xmlNode *client_msg, pcmk__client_t *curr_client,
                                const char *proxy_session)
 {
     xmlNode *wrapper = NULL;
     xmlNode *message_data = NULL;
     const char *client_name = NULL;
     const char *op = crm_element_value(client_msg, PCMK__XA_CRM_TASK);
     const char *ref = crm_element_value(client_msg, PCMK_XA_REFERENCE);
     const char *uuid = (curr_client? curr_client->id : proxy_session);
 
     if (uuid == NULL) {
         crm_warn("IPC message from client rejected: No client identifier "
                  QB_XS " ref=%s", (ref? ref : "none"));
         goto rejected;
     }
 
     if (!pcmk__str_eq(CRM_OP_HELLO, op, pcmk__str_casei)) {
         // Only hello messages need to be authorized
         return true;
     }
 
     wrapper = pcmk__xe_first_child(client_msg, PCMK__XE_CRM_XML, NULL, NULL);
     message_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
 
     client_name = crm_element_value(message_data, PCMK__XA_CLIENT_NAME);
     if (pcmk__str_empty(client_name)) {
         crm_warn("IPC hello from client rejected: No client name",
                  QB_XS " ref=%s uuid=%s", (ref? ref : "none"), uuid);
         goto rejected;
     }
     if (!authorize_version(message_data, PCMK__XA_MAJOR_VERSION, client_name,
                            ref, uuid)) {
         goto rejected;
     }
     if (!authorize_version(message_data, PCMK__XA_MINOR_VERSION, client_name,
                            ref, uuid)) {
         goto rejected;
     }
 
     crm_trace("Validated IPC hello from client %s", client_name);
     crm_log_xml_trace(client_msg, "hello");
     if (curr_client) {
         curr_client->userdata = pcmk__str_copy(client_name);
     }
     controld_trigger_fsa();
     return false;
 
 rejected:
     crm_log_xml_trace(client_msg, "rejected");
     if (curr_client) {
         qb_ipcs_disconnect(curr_client->ipcs);
     }
     return false;
 }
 
 static enum crmd_fsa_input
 handle_message(xmlNode *msg, enum crmd_fsa_cause cause)
 {
     const char *type = NULL;
 
     CRM_CHECK(msg != NULL, return I_NULL);
 
     type = crm_element_value(msg, PCMK__XA_SUBT);
     if (pcmk__str_eq(type, PCMK__VALUE_REQUEST, pcmk__str_none)) {
         return handle_request(msg, cause);
     }
 
     if (pcmk__str_eq(type, PCMK__VALUE_RESPONSE, pcmk__str_none)) {
         handle_response(msg);
         return I_NULL;
     }
 
     crm_warn("Ignoring message with unknown " PCMK__XA_SUBT" '%s'",
              pcmk__s(type, ""));
     crm_log_xml_trace(msg, "bad");
     return I_NULL;
 }
 
 static enum crmd_fsa_input
 handle_failcount_op(xmlNode * stored_msg)
 {
     const char *rsc = NULL;
     const char *uname = NULL;
     const char *op = NULL;
     char *interval_spec = NULL;
     guint interval_ms = 0;
     gboolean is_remote_node = FALSE;
 
     xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, NULL,
                                             NULL);
     xmlNode *xml_op = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
 
     if (xml_op) {
         xmlNode *xml_rsc = pcmk__xe_first_child(xml_op, PCMK_XE_PRIMITIVE, NULL,
                                                 NULL);
         xmlNode *xml_attrs = pcmk__xe_first_child(xml_op, PCMK__XE_ATTRIBUTES,
                                                   NULL, NULL);
 
         if (xml_rsc) {
             rsc = pcmk__xe_id(xml_rsc);
         }
         if (xml_attrs) {
             op = crm_element_value(xml_attrs,
                                    CRM_META "_" PCMK__META_CLEAR_FAILURE_OP);
             crm_element_value_ms(xml_attrs,
                                  CRM_META "_" PCMK__META_CLEAR_FAILURE_INTERVAL,
                                  &interval_ms);
         }
     }
     uname = crm_element_value(xml_op, PCMK__META_ON_NODE);
 
     if ((rsc == NULL) || (uname == NULL)) {
         crm_log_xml_warn(stored_msg, "invalid failcount op");
         return I_NULL;
     }
 
     if (crm_element_value(xml_op, PCMK__XA_ROUTER_NODE)) {
         is_remote_node = TRUE;
     }
 
     crm_debug("Clearing failures for %s-interval %s on %s "
               "from attribute manager, CIB, and executor state",
               pcmk__readable_interval(interval_ms), rsc, uname);
 
     if (interval_ms) {
         interval_spec = crm_strdup_printf("%ums", interval_ms);
     }
     update_attrd_clear_failures(uname, rsc, op, interval_spec, is_remote_node);
     free(interval_spec);
 
     controld_cib_delete_last_failure(rsc, uname, op, interval_ms);
 
     lrm_clear_last_failure(rsc, uname, op, interval_ms);
 
     return I_NULL;
 }
 
 static enum crmd_fsa_input
 handle_lrm_delete(xmlNode *stored_msg)
 {
     const char *mode = NULL;
     xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML, NULL,
                                             NULL);
     xmlNode *msg_data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
 
     CRM_CHECK(msg_data != NULL, return I_NULL);
 
     /* CRM_OP_LRM_DELETE has two distinct modes. The default behavior is to
      * relay the operation to the affected node, which will unregister the
      * resource from the local executor, clear the resource's history from the
      * CIB, and do some bookkeeping in the controller.
      *
      * However, if the affected node is offline, the client will specify
      * mode=PCMK__VALUE_CIB which means the controller receiving the operation
      * should clear the resource's history from the CIB and nothing else. This
      * is used to clear shutdown locks.
      */
     mode = crm_element_value(msg_data, PCMK__XA_MODE);
     if (!pcmk__str_eq(mode, PCMK__VALUE_CIB, pcmk__str_none)) {
         // Relay to affected node
         crm_xml_add(stored_msg, PCMK__XA_CRM_SYS_TO, CRM_SYSTEM_LRMD);
         return I_ROUTER;
 
     } else {
         // Delete CIB history locally (compare with do_lrm_delete())
         const char *from_sys = NULL;
         const char *user_name = NULL;
         const char *rsc_id = NULL;
         const char *node = NULL;
         xmlNode *rsc_xml = NULL;
         int rc = pcmk_rc_ok;
 
         rsc_xml = pcmk__xe_first_child(msg_data, PCMK_XE_PRIMITIVE, NULL, NULL);
         CRM_CHECK(rsc_xml != NULL, return I_NULL);
 
         rsc_id = pcmk__xe_id(rsc_xml);
         from_sys = crm_element_value(stored_msg, PCMK__XA_CRM_SYS_FROM);
         node = crm_element_value(msg_data, PCMK__META_ON_NODE);
         user_name = pcmk__update_acl_user(stored_msg, PCMK__XA_CRM_USER, NULL);
         crm_debug("Handling " CRM_OP_LRM_DELETE " for %s on %s locally%s%s "
                   "(clearing CIB resource history only)", rsc_id, node,
                   (user_name? " for user " : ""), (user_name? user_name : ""));
         rc = controld_delete_resource_history(rsc_id, node, user_name,
                                               cib_dryrun|cib_sync_call);
         if (rc == pcmk_rc_ok) {
             rc = controld_delete_resource_history(rsc_id, node, user_name,
                                                   crmd_cib_smart_opt());
         }
 
         /* Notify client. Also notify tengine if mode=PCMK__VALUE_CIB and
          * op=CRM_OP_LRM_DELETE.
          */
         if (from_sys) {
             lrmd_event_data_t *op = NULL;
             const char *from_host = crm_element_value(stored_msg, PCMK__XA_SRC);
             const char *transition;
 
             if (strcmp(from_sys, CRM_SYSTEM_TENGINE)) {
                 transition = crm_element_value(msg_data,
                                                PCMK__XA_TRANSITION_KEY);
             } else {
                 transition = crm_element_value(stored_msg,
                                                PCMK__XA_TRANSITION_KEY);
             }
 
             crm_info("Notifying %s on %s that %s was%s deleted",
                      from_sys, (from_host? from_host : "local node"), rsc_id,
                      ((rc == pcmk_rc_ok)? "" : " not"));
             op = lrmd_new_event(rsc_id, PCMK_ACTION_DELETE, 0);
             op->type = lrmd_event_exec_complete;
             op->user_data = pcmk__str_copy(pcmk__s(transition, FAKE_TE_ID));
             op->params = pcmk__strkey_table(free, free);
             pcmk__insert_dup(op->params, PCMK_XA_CRM_FEATURE_SET,
                              CRM_FEATURE_SET);
             controld_rc2event(op, rc);
             controld_ack_event_directly(from_host, from_sys, NULL, op, rsc_id);
             lrmd_free_event(op);
             controld_trigger_delete_refresh(from_sys, rsc_id);
         }
         return I_NULL;
     }
 }
 
 /*!
  * \brief Handle a CRM_OP_REMOTE_STATE message by updating remote peer cache
  *
  * \param[in] msg  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_remote_state(const xmlNode *msg)
 {
     const char *conn_host = NULL;
     const char *remote_uname = pcmk__xe_id(msg);
     pcmk__node_status_t *remote_peer;
     bool remote_is_up = false;
     int rc = pcmk_rc_ok;
 
     rc = pcmk__xe_get_bool_attr(msg, PCMK__XA_IN_CCM, &remote_is_up);
 
     CRM_CHECK(remote_uname && rc == pcmk_rc_ok, return I_NULL);
 
     remote_peer = pcmk__cluster_lookup_remote_node(remote_uname);
     CRM_CHECK(remote_peer, return I_NULL);
 
     pcmk__update_peer_state(__func__, remote_peer,
-                            remote_is_up ? CRM_NODE_MEMBER : CRM_NODE_LOST,
+                            remote_is_up ? PCMK_VALUE_MEMBER : CRM_NODE_LOST,
                             0);
 
     conn_host = crm_element_value(msg, PCMK__XA_CONNECTION_HOST);
     if (conn_host) {
         pcmk__str_update(&remote_peer->conn_host, conn_host);
     } else if (remote_peer->conn_host) {
         free(remote_peer->conn_host);
         remote_peer->conn_host = NULL;
     }
 
     return I_NULL;
 }
 
 /*!
  * \brief Handle a CRM_OP_PING message
  *
  * \param[in] msg  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_ping(const xmlNode *msg)
 {
     const char *value = NULL;
     xmlNode *ping = NULL;
     xmlNode *reply = NULL;
 
     // Build reply
 
     ping = pcmk__xe_create(NULL, PCMK__XE_PING_RESPONSE);
     value = crm_element_value(msg, PCMK__XA_CRM_SYS_TO);
     crm_xml_add(ping, PCMK__XA_CRM_SUBSYSTEM, value);
 
     // Add controller state
     value = fsa_state2string(controld_globals.fsa_state);
     crm_xml_add(ping, PCMK__XA_CRMD_STATE, value);
     crm_notice("Current ping state: %s", value); // CTS needs this
 
     // Add controller health
     // @TODO maybe do some checks to determine meaningful status
     crm_xml_add(ping, PCMK_XA_RESULT, "ok");
 
     // Send reply
     reply = create_reply(msg, ping);
     pcmk__xml_free(ping);
     if (reply != NULL) {
         (void) relay_message(reply, TRUE);
         pcmk__xml_free(reply);
     }
 
     // Nothing further to do
     return I_NULL;
 }
 
 /*!
  * \brief Handle a PCMK__CONTROLD_CMD_NODES message
  *
  * \param[in] request  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_node_list(const xmlNode *request)
 {
     GHashTableIter iter;
     pcmk__node_status_t *node = NULL;
     xmlNode *reply = NULL;
     xmlNode *reply_data = NULL;
 
     // Create message data for reply
     reply_data = pcmk__xe_create(NULL, PCMK_XE_NODES);
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
         xmlNode *xml = pcmk__xe_create(reply_data, PCMK_XE_NODE);
 
         crm_xml_add_ll(xml, PCMK_XA_ID,
                        (long long) node->cluster_layer_id); // uint32_t
         crm_xml_add(xml, PCMK_XA_UNAME, node->name);
         crm_xml_add(xml, PCMK__XA_IN_CCM, node->state);
     }
 
     // Create and send reply
     reply = create_reply(request, reply_data);
     pcmk__xml_free(reply_data);
     if (reply) {
         (void) relay_message(reply, TRUE);
         pcmk__xml_free(reply);
     }
 
     // Nothing further to do
     return I_NULL;
 }
 
 /*!
  * \brief Handle a CRM_OP_NODE_INFO request
  *
  * \param[in] msg  Message XML
  *
  * \return Next FSA input
  */
 static enum crmd_fsa_input
 handle_node_info_request(const xmlNode *msg)
 {
     const char *value = NULL;
     pcmk__node_status_t *node = NULL;
     int node_id = 0;
     xmlNode *reply = NULL;
     xmlNode *reply_data = NULL;
 
     // Build reply
 
     reply_data = pcmk__xe_create(NULL, PCMK_XE_NODE);
     crm_xml_add(reply_data, PCMK__XA_CRM_SUBSYSTEM, CRM_SYSTEM_CRMD);
 
     // Add whether current partition has quorum
     pcmk__xe_set_bool_attr(reply_data, PCMK_XA_HAVE_QUORUM,
                            pcmk_is_set(controld_globals.flags,
                                        controld_has_quorum));
 
     // Check whether client requested node info by ID and/or name
     crm_element_value_int(msg, PCMK_XA_ID, &node_id);
     if (node_id < 0) {
         node_id = 0;
     }
     value = crm_element_value(msg, PCMK_XA_UNAME);
 
     // Default to local node if none given
     if ((node_id == 0) && (value == NULL)) {
         value = controld_globals.our_nodename;
     }
 
     node = pcmk__search_node_caches(node_id, value, pcmk__node_search_any);
     if (node) {
         crm_xml_add(reply_data, PCMK_XA_ID, node->xml_id);
         crm_xml_add(reply_data, PCMK_XA_UNAME, node->name);
         crm_xml_add(reply_data, PCMK_XA_CRMD, node->state);
         pcmk__xe_set_bool_attr(reply_data, PCMK_XA_REMOTE_NODE,
                                pcmk_is_set(node->flags,
                                            pcmk__node_status_remote));
     }
 
     // Send reply
     reply = create_reply(msg, reply_data);
     pcmk__xml_free(reply_data);
     if (reply != NULL) {
         (void) relay_message(reply, TRUE);
         pcmk__xml_free(reply);
     }
 
     // Nothing further to do
     return I_NULL;
 }
 
 static void
 verify_feature_set(xmlNode *msg)
 {
     const char *dc_version = crm_element_value(msg, PCMK_XA_CRM_FEATURE_SET);
 
     if (dc_version == NULL) {
         /* All we really know is that the DC feature set is older than 3.1.0,
          * but that's also all that really matters.
          */
         dc_version = "3.0.14";
     }
 
     if (feature_set_compatible(dc_version, CRM_FEATURE_SET)) {
         crm_trace("Local feature set (%s) is compatible with DC's (%s)",
                   CRM_FEATURE_SET, dc_version);
     } else {
         crm_err("Local feature set (%s) is incompatible with DC's (%s)",
                 CRM_FEATURE_SET, dc_version);
 
         // Nothing is likely to improve without administrator involvement
         controld_set_fsa_input_flags(R_STAYDOWN);
         crmd_exit(CRM_EX_FATAL);
     }
 }
 
 // DC gets own shutdown all-clear
 static enum crmd_fsa_input
 handle_shutdown_self_ack(xmlNode *stored_msg)
 {
     const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC);
 
     if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
         // The expected case -- we initiated own shutdown sequence
         crm_info("Shutting down controller");
         return I_STOP;
     }
 
     if (pcmk__str_eq(host_from, controld_globals.dc_name, pcmk__str_casei)) {
         // Must be logic error -- DC confirming its own unrequested shutdown
         crm_err("Shutting down controller immediately due to "
                 "unexpected shutdown confirmation");
         return I_TERMINATE;
     }
 
     if (controld_globals.fsa_state != S_STOPPING) {
         // Shouldn't happen -- non-DC confirming unrequested shutdown
         crm_err("Starting new DC election because %s is "
                 "confirming shutdown we did not request",
                 (host_from? host_from : "another node"));
         return I_ELECTION;
     }
 
     // Shouldn't happen, but we are already stopping anyway
     crm_debug("Ignoring unexpected shutdown confirmation from %s",
               (host_from? host_from : "another node"));
     return I_NULL;
 }
 
 // Non-DC gets shutdown all-clear from DC
 static enum crmd_fsa_input
 handle_shutdown_ack(xmlNode *stored_msg)
 {
     const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC);
 
     if (host_from == NULL) {
         crm_warn("Ignoring shutdown request without origin specified");
         return I_NULL;
     }
 
     if (pcmk__str_eq(host_from, controld_globals.dc_name,
                      pcmk__str_null_matches|pcmk__str_casei)) {
 
         if (pcmk_is_set(controld_globals.fsa_input_register, R_SHUTDOWN)) {
             crm_info("Shutting down controller after confirmation from %s",
                      host_from);
         } else {
             crm_err("Shutting down controller after unexpected "
                     "shutdown request from %s", host_from);
             controld_set_fsa_input_flags(R_STAYDOWN);
         }
         return I_STOP;
     }
 
     crm_warn("Ignoring shutdown request from %s because DC is %s",
              host_from, controld_globals.dc_name);
     return I_NULL;
 }
 
 static enum crmd_fsa_input
 handle_request(xmlNode *stored_msg, enum crmd_fsa_cause cause)
 {
     xmlNode *msg = NULL;
     const char *op = crm_element_value(stored_msg, PCMK__XA_CRM_TASK);
 
     /* Optimize this for the DC - it has the most to do */
 
     crm_log_xml_trace(stored_msg, "request");
     if (op == NULL) {
         crm_warn("Ignoring request without " PCMK__XA_CRM_TASK);
         return I_NULL;
     }
 
     if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) {
         const char *from = crm_element_value(stored_msg, PCMK__XA_SRC);
         pcmk__node_status_t *node =
             pcmk__search_node_caches(0, from, pcmk__node_search_cluster_member);
 
         pcmk__update_peer_expected(__func__, node, CRMD_JOINSTATE_DOWN);
         if(AM_I_DC == FALSE) {
             return I_NULL; /* Done */
         }
     }
 
     /*========== DC-Only Actions ==========*/
     if (AM_I_DC) {
         if (strcmp(op, CRM_OP_JOIN_ANNOUNCE) == 0) {
             return I_NODE_JOIN;
 
         } else if (strcmp(op, CRM_OP_JOIN_REQUEST) == 0) {
             return I_JOIN_REQUEST;
 
         } else if (strcmp(op, CRM_OP_JOIN_CONFIRM) == 0) {
             return I_JOIN_RESULT;
 
         } else if (strcmp(op, CRM_OP_SHUTDOWN) == 0) {
             return handle_shutdown_self_ack(stored_msg);
 
         } else if (strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0) {
             // Another controller wants to shut down its node
             return handle_shutdown_request(stored_msg);
         }
     }
 
     /*========== common actions ==========*/
     if (strcmp(op, CRM_OP_NOVOTE) == 0) {
         ha_msg_input_t fsa_input;
 
         fsa_input.msg = stored_msg;
         register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input,
                                A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE,
                                __func__);
 
     } else if (strcmp(op, CRM_OP_REMOTE_STATE) == 0) {
         /* a remote connection host is letting us know the node state */
         return handle_remote_state(stored_msg);
 
     } else if (strcmp(op, CRM_OP_THROTTLE) == 0) {
         throttle_update(stored_msg);
         if (AM_I_DC && (controld_globals.transition_graph != NULL)
             && !controld_globals.transition_graph->complete) {
 
             crm_debug("The throttle changed. Trigger a graph.");
             trigger_graph();
         }
         return I_NULL;
 
     } else if (strcmp(op, CRM_OP_CLEAR_FAILCOUNT) == 0) {
         return handle_failcount_op(stored_msg);
 
     } else if (strcmp(op, CRM_OP_VOTE) == 0) {
         /* count the vote and decide what to do after that */
         ha_msg_input_t fsa_input;
 
         fsa_input.msg = stored_msg;
         register_fsa_input_adv(C_HA_MESSAGE, I_NULL, &fsa_input,
                                A_ELECTION_COUNT | A_ELECTION_CHECK, FALSE,
                                __func__);
 
         /* Sometimes we _must_ go into S_ELECTION */
         if (controld_globals.fsa_state == S_HALT) {
             crm_debug("Forcing an election from S_HALT");
             return I_ELECTION;
         }
 
     } else if (strcmp(op, CRM_OP_JOIN_OFFER) == 0) {
         verify_feature_set(stored_msg);
         crm_debug("Raising I_JOIN_OFFER: join-%s",
                   crm_element_value(stored_msg, PCMK__XA_JOIN_ID));
         return I_JOIN_OFFER;
 
     } else if (strcmp(op, CRM_OP_JOIN_ACKNAK) == 0) {
         crm_debug("Raising I_JOIN_RESULT: join-%s",
                   crm_element_value(stored_msg, PCMK__XA_JOIN_ID));
         return I_JOIN_RESULT;
 
     } else if (strcmp(op, CRM_OP_LRM_DELETE) == 0) {
         return handle_lrm_delete(stored_msg);
 
     } else if ((strcmp(op, CRM_OP_LRM_FAIL) == 0)
                || (strcmp(op, CRM_OP_LRM_REFRESH) == 0) // @COMPAT
                || (strcmp(op, CRM_OP_REPROBE) == 0)) {
 
         crm_xml_add(stored_msg, PCMK__XA_CRM_SYS_TO, CRM_SYSTEM_LRMD);
         return I_ROUTER;
 
     } else if (strcmp(op, CRM_OP_NOOP) == 0) {
         return I_NULL;
 
     } else if (strcmp(op, CRM_OP_PING) == 0) {
         return handle_ping(stored_msg);
 
     } else if (strcmp(op, CRM_OP_NODE_INFO) == 0) {
         return handle_node_info_request(stored_msg);
 
     } else if (strcmp(op, CRM_OP_RM_NODE_CACHE) == 0) {
         int id = 0;
         const char *name = NULL;
 
         crm_element_value_int(stored_msg, PCMK_XA_ID, &id);
         name = crm_element_value(stored_msg, PCMK_XA_UNAME);
 
         if(cause == C_IPC_MESSAGE) {
             msg = create_request(CRM_OP_RM_NODE_CACHE, NULL, NULL, CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
             if (!pcmk__cluster_send_message(NULL, pcmk__cluster_msg_controld,
                                             msg)) {
                 crm_err("Could not instruct peers to remove references to node %s/%u", name, id);
             } else {
                 crm_notice("Instructing peers to remove references to node %s/%u", name, id);
             }
             pcmk__xml_free(msg);
 
         } else {
             pcmk__cluster_forget_cluster_node(id, name);
 
             /* If we're forgetting this node, also forget any failures to fence
              * it, so we don't carry that over to any node added later with the
              * same name.
              */
             st_fail_count_reset(name);
         }
 
     } else if (strcmp(op, CRM_OP_MAINTENANCE_NODES) == 0) {
         xmlNode *wrapper = pcmk__xe_first_child(stored_msg, PCMK__XE_CRM_XML,
                                                 NULL, NULL);
         xmlNode *xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
 
         remote_ra_process_maintenance_nodes(xml);
 
     } else if (strcmp(op, PCMK__CONTROLD_CMD_NODES) == 0) {
         return handle_node_list(stored_msg);
 
         /*========== (NOT_DC)-Only Actions ==========*/
     } else if (!AM_I_DC) {
 
         if (strcmp(op, CRM_OP_SHUTDOWN) == 0) {
             return handle_shutdown_ack(stored_msg);
         }
 
     } else {
         crm_err("Unexpected request (%s) sent to %s", op, AM_I_DC ? "the DC" : "non-DC node");
         crm_log_xml_err(stored_msg, "Unexpected");
     }
 
     return I_NULL;
 }
 
 static void
 handle_response(xmlNode *stored_msg)
 {
     const char *op = crm_element_value(stored_msg, PCMK__XA_CRM_TASK);
 
     crm_log_xml_trace(stored_msg, "reply");
     if (op == NULL) {
         crm_warn("Ignoring reply without " PCMK__XA_CRM_TASK);
 
     } else if (AM_I_DC && strcmp(op, CRM_OP_PECALC) == 0) {
         // Check whether scheduler answer been superseded by subsequent request
         const char *msg_ref = crm_element_value(stored_msg, PCMK_XA_REFERENCE);
 
         if (msg_ref == NULL) {
             crm_err("%s - Ignoring calculation with no reference", op);
 
         } else if (pcmk__str_eq(msg_ref, controld_globals.fsa_pe_ref,
                                 pcmk__str_none)) {
             ha_msg_input_t fsa_input;
 
             controld_stop_sched_timer();
             fsa_input.msg = stored_msg;
             register_fsa_input_later(C_IPC_MESSAGE, I_PE_SUCCESS, &fsa_input);
 
         } else {
             crm_info("%s calculation %s is obsolete", op, msg_ref);
         }
 
     } else if (strcmp(op, CRM_OP_VOTE) == 0
                || strcmp(op, CRM_OP_SHUTDOWN_REQ) == 0 || strcmp(op, CRM_OP_SHUTDOWN) == 0) {
 
     } else {
         const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC);
 
         crm_err("Unexpected response (op=%s, src=%s) sent to the %s",
                 op, host_from, AM_I_DC ? "DC" : "controller");
     }
 }
 
 static enum crmd_fsa_input
 handle_shutdown_request(xmlNode * stored_msg)
 {
     /* handle here to avoid potential version issues
      *   where the shutdown message/procedure may have
      *   been changed in later versions.
      *
      * This way the DC is always in control of the shutdown
      */
 
     char *now_s = NULL;
     const char *host_from = crm_element_value(stored_msg, PCMK__XA_SRC);
 
     if (host_from == NULL) {
         /* we're shutting down and the DC */
         host_from = controld_globals.our_nodename;
     }
 
     crm_info("Creating shutdown request for %s (state=%s)", host_from,
              fsa_state2string(controld_globals.fsa_state));
     crm_log_xml_trace(stored_msg, "message");
 
     now_s = pcmk__ttoa(time(NULL));
     update_attrd(host_from, PCMK__NODE_ATTR_SHUTDOWN, now_s, NULL, FALSE);
     free(now_s);
 
     /* will be picked up by the TE as long as its running */
     return I_NULL;
 }
 
 static void
 send_msg_via_ipc(xmlNode * msg, const char *sys)
 {
     pcmk__client_t *client_channel = NULL;
 
     CRM_CHECK(sys != NULL, return);
 
     client_channel = pcmk__find_client_by_id(sys);
 
     if (crm_element_value(msg, PCMK__XA_SRC) == NULL) {
         crm_xml_add(msg, PCMK__XA_SRC, controld_globals.our_nodename);
     }
 
     if (client_channel != NULL) {
         /* Transient clients such as crmadmin */
         pcmk__ipc_send_xml(client_channel, 0, msg, crm_ipc_server_event);
 
     } else if (pcmk__str_eq(sys, CRM_SYSTEM_TENGINE, pcmk__str_none)) {
         xmlNode *wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL,
                                                 NULL);
         xmlNode *data = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
 
         process_te_message(msg, data);
 
     } else if (pcmk__str_eq(sys, CRM_SYSTEM_LRMD, pcmk__str_none)) {
         fsa_data_t fsa_data;
         ha_msg_input_t fsa_input;
         xmlNode *wrapper = NULL;
 
         fsa_input.msg = msg;
 
         wrapper = pcmk__xe_first_child(msg, PCMK__XE_CRM_XML, NULL, NULL);
         fsa_input.xml = pcmk__xe_first_child(wrapper, NULL, NULL, NULL);
 
         fsa_data.id = 0;
         fsa_data.actions = 0;
         fsa_data.data = &fsa_input;
         fsa_data.fsa_input = I_MESSAGE;
         fsa_data.fsa_cause = C_IPC_MESSAGE;
         fsa_data.origin = __func__;
         fsa_data.data_type = fsa_dt_ha_msg;
 
         do_lrm_invoke(A_LRM_INVOKE, C_IPC_MESSAGE, controld_globals.fsa_state,
                       I_MESSAGE, &fsa_data);
 
     } else if (crmd_is_proxy_session(sys)) {
         crmd_proxy_send(sys, msg);
 
     } else {
         crm_info("Received invalid request: unknown subsystem '%s'", sys);
     }
 }
 
 void
 delete_ha_msg_input(ha_msg_input_t * orig)
 {
     if (orig == NULL) {
         return;
     }
     pcmk__xml_free(orig->msg);
     free(orig);
 }
 
 /*!
  * \internal
  * \brief Notify the cluster of a remote node state change
  *
  * \param[in] node_name  Node's name
  * \param[in] node_up    true if node is up, false if down
  */
 void
 broadcast_remote_state_message(const char *node_name, bool node_up)
 {
     xmlNode *msg = create_request(CRM_OP_REMOTE_STATE, NULL, NULL,
                                   CRM_SYSTEM_CRMD, CRM_SYSTEM_CRMD, NULL);
 
     crm_info("Notifying cluster of Pacemaker Remote node %s %s",
              node_name, node_up? "coming up" : "going down");
 
     crm_xml_add(msg, PCMK_XA_ID, node_name);
     pcmk__xe_set_bool_attr(msg, PCMK__XA_IN_CCM, node_up);
 
     if (node_up) {
         crm_xml_add(msg, PCMK__XA_CONNECTION_HOST,
                     controld_globals.our_nodename);
     }
 
     pcmk__cluster_send_message(NULL, pcmk__cluster_msg_controld, msg);
     pcmk__xml_free(msg);
 }
 
diff --git a/daemons/controld/controld_remote_ra.c b/daemons/controld/controld_remote_ra.c
index 9a483a06e4..e8fa229b99 100644
--- a/daemons/controld/controld_remote_ra.c
+++ b/daemons/controld/controld_remote_ra.c
@@ -1,1474 +1,1474 @@
 /*
  * Copyright 2013-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/lrmd.h>
 #include <crm/lrmd_internal.h>
 #include <crm/services.h>
 
 #include <pacemaker-controld.h>
 
 #define REMOTE_LRMD_RA "remote"
 
 /* The max start timeout before cmd retry */
 #define MAX_START_TIMEOUT_MS 10000
 
 #define cmd_set_flags(cmd, flags_to_set) do { \
     (cmd)->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, \
                                        "Remote command", (cmd)->rsc_id, (cmd)->status, \
                                        (flags_to_set), #flags_to_set); \
         } while (0)
 
 #define cmd_clear_flags(cmd, flags_to_clear) do { \
     (cmd)->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, \
                                          "Remote command", (cmd)->rsc_id, (cmd)->status, \
                                          (flags_to_clear), #flags_to_clear); \
         } while (0)
 
 enum remote_cmd_status {
     cmd_reported_success    = (1 << 0),
     cmd_cancel              = (1 << 1),
 };
 
 typedef struct remote_ra_cmd_s {
     /*! the local node the cmd is issued from */
     char *owner;
     /*! the remote node the cmd is executed on */
     char *rsc_id;
     /*! the action to execute */
     char *action;
     /*! some string the client wants us to give it back */
     char *userdata;
     /*! start delay in ms */
     int start_delay;
     /*! timer id used for start delay. */
     int delay_id;
     /*! timeout in ms for cmd */
     int timeout;
     int remaining_timeout;
     /*! recurring interval in ms */
     guint interval_ms;
     /*! interval timer id */
     int interval_id;
     int monitor_timeout_id;
     int takeover_timeout_id;
     /*! action parameters */
     lrmd_key_value_t *params;
     pcmk__action_result_t result;
     int call_id;
     time_t start_time;
     uint32_t status;
 } remote_ra_cmd_t;
 
 #define lrm_remote_set_flags(lrm_state, flags_to_set) do { \
     lrm_state_t *lrm = (lrm_state); \
     remote_ra_data_t *ra = lrm->remote_ra_data; \
     ra->status = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
                                     lrm->node_name, ra->status, \
                                     (flags_to_set), #flags_to_set); \
         } while (0)
 
 #define lrm_remote_clear_flags(lrm_state, flags_to_clear) do { \
     lrm_state_t *lrm = (lrm_state); \
     remote_ra_data_t *ra = lrm->remote_ra_data; \
     ra->status = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE, "Remote", \
                                       lrm->node_name, ra->status, \
                                       (flags_to_clear), #flags_to_clear); \
         } while (0)
 
 enum remote_status {
     expect_takeover     = (1 << 0),
     takeover_complete   = (1 << 1),
     remote_active       = (1 << 2),
     /* Maintenance mode is difficult to determine from the controller's context,
      * so we have it signalled back with the transition from the scheduler.
      */
     remote_in_maint     = (1 << 3),
     /* Similar for whether we are controlling a guest node or remote node.
      * Fortunately there is a meta-attribute in the transition already and
      * as the situation doesn't change over time we can use the
      * resource start for noting down the information for later use when
      * the attributes aren't at hand.
      */
     controlling_guest   = (1 << 4),
 };
 
 typedef struct remote_ra_data_s {
     crm_trigger_t *work;
     remote_ra_cmd_t *cur_cmd;
     GList *cmds;
     GList *recurring_cmds;
     uint32_t status;
 } remote_ra_data_t;
 
 static int handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms);
 static void handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd);
 static GList *fail_all_monitor_cmds(GList * list);
 
 static void
 free_cmd(gpointer user_data)
 {
     remote_ra_cmd_t *cmd = user_data;
 
     if (!cmd) {
         return;
     }
     if (cmd->delay_id) {
         g_source_remove(cmd->delay_id);
     }
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
     }
     if (cmd->monitor_timeout_id) {
         g_source_remove(cmd->monitor_timeout_id);
     }
     if (cmd->takeover_timeout_id) {
         g_source_remove(cmd->takeover_timeout_id);
     }
     free(cmd->owner);
     free(cmd->rsc_id);
     free(cmd->action);
     free(cmd->userdata);
     pcmk__reset_result(&(cmd->result));
     lrmd_key_value_freeall(cmd->params);
     free(cmd);
 }
 
 static int
 generate_callid(void)
 {
     static int remote_ra_callid = 0;
 
     remote_ra_callid++;
     if (remote_ra_callid <= 0) {
         remote_ra_callid = 1;
     }
 
     return remote_ra_callid;
 }
 
 static gboolean
 recurring_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->interval_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         ra_data->recurring_cmds = g_list_remove(ra_data->recurring_cmds, cmd);
 
         ra_data->cmds = g_list_append(ra_data->cmds, cmd);
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 static gboolean
 start_delay_helper(gpointer data)
 {
     remote_ra_cmd_t *cmd = data;
     lrm_state_t *connection_rsc = NULL;
 
     cmd->delay_id = 0;
     connection_rsc = lrm_state_find(cmd->rsc_id);
     if (connection_rsc && connection_rsc->remote_ra_data) {
         remote_ra_data_t *ra_data = connection_rsc->remote_ra_data;
 
         mainloop_set_trigger(ra_data->work);
     }
     return FALSE;
 }
 
 static bool
 should_purge_attributes(pcmk__node_status_t *node)
 {
     bool purge = true;
     pcmk__node_status_t *conn_node = NULL;
     lrm_state_t *connection_rsc = NULL;
 
     if (!node->conn_host) {
         return purge;
     }
 
     /* Get the node that was hosting the remote connection resource from the
      * peer cache.  That's the one we really care about here.
      */
     conn_node = pcmk__get_node(0, node->conn_host, NULL,
                                pcmk__node_search_cluster_member);
     if (conn_node == NULL) {
         return purge;
     }
 
     /* Check the uptime of connection_rsc.  If it hasn't been running long
      * enough, set purge=true.  "Long enough" means it started running earlier
      * than the timestamp when we noticed it went away in the first place.
      */
     connection_rsc = lrm_state_find(node->name);
 
     if (connection_rsc != NULL) {
         lrmd_t *lrm = connection_rsc->conn;
         time_t uptime = lrmd__uptime(lrm);
         time_t now = time(NULL);
 
         /* Add 20s of fuzziness to give corosync a while to notice the remote
          * host is gone.  On various error conditions (failure to get uptime,
          * peer_lost isn't set) we default to purging.
          */
         if (uptime > 0 &&
             conn_node->peer_lost > 0 &&
             uptime + 20 >= now - conn_node->peer_lost) {
             purge = false;
         }
     }
 
     return purge;
 }
 
 static enum controld_section_e
 section_to_delete(bool purge)
 {
     if (pcmk_is_set(controld_globals.flags, controld_shutdown_lock_enabled)) {
         if (purge) {
             return controld_section_all_unlocked;
         } else {
             return controld_section_lrm_unlocked;
         }
     } else {
         if (purge) {
             return controld_section_all;
         } else {
             return controld_section_lrm;
         }
     }
 }
 
 static void
 purge_remote_node_attrs(int call_opt, pcmk__node_status_t *node)
 {
     bool purge = should_purge_attributes(node);
     enum controld_section_e section = section_to_delete(purge);
 
     /* Purge node from attrd's memory */
     if (purge) {
         update_attrd_remote_node_removed(node->name, NULL);
     }
 
     controld_delete_node_state(node->name, section, call_opt);
 }
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node joining
  *
  * \param[in] node_name  Name of newly integrated pacemaker_remote node
  */
 static void
 remote_node_up(const char *node_name)
 {
     int call_opt;
     xmlNode *update, *state;
     pcmk__node_status_t *node = NULL;
     lrm_state_t *connection_rsc = NULL;
 
     CRM_CHECK(node_name != NULL, return);
     crm_info("Announcing Pacemaker Remote node %s", node_name);
 
     call_opt = crmd_cib_smart_opt();
 
     /* Delete node's CRM_OP_PROBED attribute. Deleting any attribute ensures
      * that the attribute manager learns the node is remote. Deletion of this
      * specfic attribute is a holdover from when it had special meaning.
      *
      * @COMPAT Find another way to tell attrd that the node is remote, without
      * risking deletion or overwrite of an arbitrary attribute. Then work on
      * deprecating CRM_OP_PROBED.
      */
     update_attrd(node_name, CRM_OP_PROBED, NULL, NULL, TRUE);
 
     /* Ensure node is in the remote peer cache with member status */
     node = pcmk__cluster_lookup_remote_node(node_name);
     CRM_CHECK(node != NULL, return);
 
     purge_remote_node_attrs(call_opt, node);
-    pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
+    pcmk__update_peer_state(__func__, node, PCMK_VALUE_MEMBER, 0);
 
     /* Apply any start state that we were given from the environment on the
      * remote node.
      */
     connection_rsc = lrm_state_find(node->name);
 
     if (connection_rsc != NULL) {
         lrmd_t *lrm = connection_rsc->conn;
         const char *start_state = lrmd__node_start_state(lrm);
 
         if (start_state) {
             set_join_state(start_state, node->name, node->xml_id, true);
         }
     }
 
     /* pacemaker_remote nodes don't participate in the membership layer,
      * so cluster nodes don't automatically get notified when they come and go.
      * We send a cluster message to the DC, and update the CIB node state entry,
      * so the DC will get it sooner (via message) or later (via CIB refresh),
      * and any other interested parties can query the CIB.
      */
     broadcast_remote_state_message(node_name, true);
 
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
     state = create_node_state_update(node, node_update_cluster, update,
                                      __func__);
 
     /* Clear the PCMK__XA_NODE_FENCED flag in the node state. If the node ever
      * needs to be fenced, this flag will allow various actions to determine
      * whether the fencing has happened yet.
      */
     crm_xml_add(state, PCMK__XA_NODE_FENCED, "0");
 
     /* TODO: If the remote connection drops, and this (async) CIB update either
      * failed or has not yet completed, later actions could mistakenly think the
      * node has already been fenced (if the PCMK__XA_NODE_FENCED attribute was
      * previously set, because it won't have been cleared). This could prevent
      * actual fencing or allow recurring monitor failures to be cleared too
      * soon. Ideally, we wouldn't rely on the CIB for the fenced status.
      */
     controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL);
     pcmk__xml_free(update);
 }
 
 enum down_opts {
     DOWN_KEEP_LRM,
     DOWN_ERASE_LRM
 };
 
 /*!
  * \internal
  * \brief Handle cluster communication related to pacemaker_remote node leaving
  *
  * \param[in] node_name  Name of lost node
  * \param[in] opts       Whether to keep or erase LRM history
  */
 static void
 remote_node_down(const char *node_name, const enum down_opts opts)
 {
     xmlNode *update;
     int call_opt = crmd_cib_smart_opt();
     pcmk__node_status_t *node = NULL;
 
     /* Purge node from attrd's memory */
     update_attrd_remote_node_removed(node_name, NULL);
 
     /* Normally, only node attributes should be erased, and the resource history
      * should be kept until the node comes back up. However, after a successful
      * fence, we want to clear the history as well, so we don't think resources
      * are still running on the node.
      */
     if (opts == DOWN_ERASE_LRM) {
         controld_delete_node_state(node_name, controld_section_all, call_opt);
     } else {
         controld_delete_node_state(node_name, controld_section_attrs, call_opt);
     }
 
     /* Ensure node is in the remote peer cache with lost state */
     node = pcmk__cluster_lookup_remote_node(node_name);
     CRM_CHECK(node != NULL, return);
     pcmk__update_peer_state(__func__, node, CRM_NODE_LOST, 0);
 
     /* Notify DC */
     broadcast_remote_state_message(node_name, false);
 
     /* Update CIB node state */
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
     create_node_state_update(node, node_update_cluster, update, __func__);
     controld_update_cib(PCMK_XE_STATUS, update, call_opt, NULL);
     pcmk__xml_free(update);
 }
 
 /*!
  * \internal
  * \brief Handle effects of a remote RA command on node state
  *
  * \param[in] cmd  Completed remote RA command
  */
 static void
 check_remote_node_state(const remote_ra_cmd_t *cmd)
 {
     /* Only successful actions can change node state */
     if (!pcmk__result_ok(&(cmd->result))) {
         return;
     }
 
     if (pcmk__str_eq(cmd->action, PCMK_ACTION_START, pcmk__str_casei)) {
         remote_node_up(cmd->rsc_id);
 
     } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_MIGRATE_FROM,
                             pcmk__str_casei)) {
         /* After a successful migration, we don't need to do remote_node_up()
          * because the DC already knows the node is up, and we don't want to
          * clear LRM history etc. We do need to add the remote node to this
          * host's remote peer cache, because (unless it happens to be DC)
          * it hasn't been tracking the remote node, and other code relies on
          * the cache to distinguish remote nodes from unseen cluster nodes.
          */
         pcmk__node_status_t *node =
             pcmk__cluster_lookup_remote_node(cmd->rsc_id);
 
         CRM_CHECK(node != NULL, return);
-        pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, 0);
+        pcmk__update_peer_state(__func__, node, PCMK_VALUE_MEMBER, 0);
 
     } else if (pcmk__str_eq(cmd->action, PCMK_ACTION_STOP, pcmk__str_casei)) {
         lrm_state_t *lrm_state = lrm_state_find(cmd->rsc_id);
         remote_ra_data_t *ra_data = lrm_state? lrm_state->remote_ra_data : NULL;
 
         if (ra_data) {
             if (!pcmk_is_set(ra_data->status, takeover_complete)) {
                 /* Stop means down if we didn't successfully migrate elsewhere */
                 remote_node_down(cmd->rsc_id, DOWN_KEEP_LRM);
             } else if (AM_I_DC == FALSE) {
                 /* Only the connection host and DC track node state,
                  * so if the connection migrated elsewhere and we aren't DC,
                  * un-cache the node, so we don't have stale info
                  */
                 pcmk__cluster_forget_remote_node(cmd->rsc_id);
             }
         }
     }
 
     /* We don't do anything for successful monitors, which is correct for
      * routine recurring monitors, and for monitors on nodes where the
      * connection isn't supposed to be (the cluster will stop the connection in
      * that case). However, if the initial probe finds the connection already
      * active on the node where we want it, we probably should do
      * remote_node_up(). Unfortunately, we can't distinguish that case here.
      * Given that connections have to be initiated by the cluster, the chance of
      * that should be close to zero.
      */
 }
 
 static void
 report_remote_ra_result(remote_ra_cmd_t * cmd)
 {
     lrmd_event_data_t op = { 0, };
 
     check_remote_node_state(cmd);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = cmd->rsc_id;
     op.op_type = cmd->action;
     op.user_data = cmd->userdata;
     op.timeout = cmd->timeout;
     op.interval_ms = cmd->interval_ms;
     op.t_run = cmd->start_time;
     op.t_rcchange = cmd->start_time;
 
     lrmd__set_result(&op, cmd->result.exit_status, cmd->result.execution_status,
                      cmd->result.exit_reason);
 
     if (pcmk_is_set(cmd->status, cmd_reported_success) && !pcmk__result_ok(&(cmd->result))) {
         op.t_rcchange = time(NULL);
         /* This edge case will likely never ever occur, but if it does the
          * result is that a failure will not be processed correctly. This is only
          * remotely possible because we are able to detect a connection resource's tcp
          * connection has failed at any moment after start has completed. The actual
          * recurring operation is just a connectivity ping.
          *
          * basically, we are not guaranteed that the first successful monitor op and
          * a subsequent failed monitor op will not occur in the same timestamp. We have to
          * make it look like the operations occurred at separate times though. */
         if (op.t_rcchange == op.t_run) {
             op.t_rcchange++;
         }
     }
 
     if (cmd->params) {
         lrmd_key_value_t *tmp;
 
         op.params = pcmk__strkey_table(free, free);
         for (tmp = cmd->params; tmp; tmp = tmp->next) {
             pcmk__insert_dup(op.params, tmp->key, tmp->value);
         }
 
     }
     op.call_id = cmd->call_id;
     op.remote_nodename = cmd->owner;
 
     lrm_op_callback(&op);
 
     if (op.params) {
         g_hash_table_destroy(op.params);
     }
     lrmd__reset_result(&op);
 }
 
 static void
 update_remaining_timeout(remote_ra_cmd_t * cmd)
 {
     cmd->remaining_timeout = ((cmd->timeout / 1000) - (time(NULL) - cmd->start_time)) * 1000;
 }
 
 static gboolean
 retry_start_cmd_cb(gpointer data)
 {
     lrm_state_t *lrm_state = data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd = NULL;
     int rc = ETIME;
 
     if (!ra_data || !ra_data->cur_cmd) {
         return FALSE;
     }
     cmd = ra_data->cur_cmd;
     if (!pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
                               PCMK_ACTION_MIGRATE_FROM, NULL)) {
         return FALSE;
     }
     update_remaining_timeout(cmd);
 
     if (cmd->remaining_timeout > 0) {
         rc = handle_remote_ra_start(lrm_state, cmd, cmd->remaining_timeout);
     } else {
         pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                          PCMK_EXEC_TIMEOUT,
                          "Not enough time remains to retry remote connection");
     }
 
     if (rc != pcmk_rc_ok) {
         report_remote_ra_result(cmd);
 
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         ra_data->cur_cmd = NULL;
         free_cmd(cmd);
     } else {
         /* wait for connection event */
     }
 
     return FALSE;
 }
 
 
 static gboolean
 connection_takeover_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     crm_info("takeover event timed out for node %s", cmd->rsc_id);
     cmd->takeover_timeout_id = 0;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     handle_remote_ra_stop(lrm_state, cmd);
     free_cmd(cmd);
 
     return FALSE;
 }
 
 static gboolean
 monitor_timeout_cb(gpointer data)
 {
     lrm_state_t *lrm_state = NULL;
     remote_ra_cmd_t *cmd = data;
 
     lrm_state = lrm_state_find(cmd->rsc_id);
 
     crm_info("Timed out waiting for remote poke response from %s%s",
              cmd->rsc_id, (lrm_state? "" : " (no LRM state)"));
     cmd->monitor_timeout_id = 0;
     pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR, PCMK_EXEC_TIMEOUT,
                      "Remote executor did not respond");
 
     if (lrm_state && lrm_state->remote_ra_data) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         if (ra_data->cur_cmd == cmd) {
             ra_data->cur_cmd = NULL;
         }
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
     }
 
     report_remote_ra_result(cmd);
     free_cmd(cmd);
 
     if(lrm_state) {
         lrm_state_disconnect(lrm_state);
     }
     return FALSE;
 }
 
 static void
 synthesize_lrmd_success(lrm_state_t *lrm_state, const char *rsc_id, const char *op_type)
 {
     lrmd_event_data_t op = { 0, };
 
     if (lrm_state == NULL) {
         /* if lrm_state not given assume local */
         lrm_state = lrm_state_find(controld_globals.our_nodename);
     }
     CRM_ASSERT(lrm_state != NULL);
 
     op.type = lrmd_event_exec_complete;
     op.rsc_id = rsc_id;
     op.op_type = op_type;
     op.t_run = time(NULL);
     op.t_rcchange = op.t_run;
     op.call_id = generate_callid();
     lrmd__set_result(&op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
     process_lrm_event(lrm_state, &op, NULL, NULL);
 }
 
 void
 remote_lrm_op_callback(lrmd_event_data_t * op)
 {
     gboolean cmd_handled = FALSE;
     lrm_state_t *lrm_state = NULL;
     remote_ra_data_t *ra_data = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     crm_debug("Processing '%s%s%s' event on remote connection to %s: %s "
               "(%d) status=%s (%d)",
               (op->op_type? op->op_type : ""), (op->op_type? " " : ""),
               lrmd_event_type2str(op->type), op->remote_nodename,
               services_ocf_exitcode_str(op->rc), op->rc,
               pcmk_exec_status_str(op->op_status), op->op_status);
 
     lrm_state = lrm_state_find(op->remote_nodename);
     if (!lrm_state || !lrm_state->remote_ra_data) {
         crm_debug("No state information found for remote connection event");
         return;
     }
     ra_data = lrm_state->remote_ra_data;
 
     if (op->type == lrmd_event_new_client) {
         // Another client has connected to the remote daemon
 
         if (pcmk_is_set(ra_data->status, expect_takeover)) {
             // Great, we knew this was coming
             lrm_remote_clear_flags(lrm_state, expect_takeover);
             lrm_remote_set_flags(lrm_state, takeover_complete);
 
         } else {
             crm_err("Disconnecting from Pacemaker Remote node %s due to "
                     "unexpected client takeover", op->remote_nodename);
             /* In this case, lrmd_tls_connection_destroy() will be called under the control of mainloop. */
             /* Do not free lrm_state->conn yet. */
             /* It'll be freed in the following stop action. */
             lrm_state_disconnect_only(lrm_state);
         }
         return;
     }
 
     /* filter all EXEC events up */
     if (op->type == lrmd_event_exec_complete) {
         if (pcmk_is_set(ra_data->status, takeover_complete)) {
             crm_debug("ignoring event, this connection is taken over by another node");
         } else {
             lrm_op_callback(op);
         }
         return;
     }
 
     if ((op->type == lrmd_event_disconnect) && (ra_data->cur_cmd == NULL)) {
 
         if (!pcmk_is_set(ra_data->status, remote_active)) {
             crm_debug("Disconnection from Pacemaker Remote node %s complete",
                       lrm_state->node_name);
 
         } else if (!remote_ra_is_in_maintenance(lrm_state)) {
             crm_err("Lost connection to Pacemaker Remote node %s",
                     lrm_state->node_name);
             ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
             ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
 
         } else {
             crm_notice("Unmanaged Pacemaker Remote node %s disconnected",
                        lrm_state->node_name);
             /* Do roughly what a 'stop' on the remote-resource would do */
             handle_remote_ra_stop(lrm_state, NULL);
             remote_node_down(lrm_state->node_name, DOWN_KEEP_LRM);
             /* now fake the reply of a successful 'stop' */
             synthesize_lrmd_success(NULL, lrm_state->node_name,
                                     PCMK_ACTION_STOP);
         }
         return;
     }
 
     if (!ra_data->cur_cmd) {
         crm_debug("no event to match");
         return;
     }
 
     cmd = ra_data->cur_cmd;
 
     /* Start actions and migrate from actions complete after connection
      * comes back to us. */
     if ((op->type == lrmd_event_connect)
         && pcmk__strcase_any_of(cmd->action, PCMK_ACTION_START,
                                 PCMK_ACTION_MIGRATE_FROM, NULL)) {
         if (op->connection_rc < 0) {
             update_remaining_timeout(cmd);
 
             if ((op->connection_rc == -ENOKEY)
                 || (op->connection_rc == -EKEYREJECTED)) {
                 // Hard error, don't retry
                 pcmk__set_result(&(cmd->result), PCMK_OCF_INVALID_PARAM,
                                  PCMK_EXEC_ERROR,
                                  pcmk_strerror(op->connection_rc));
 
             } else if (cmd->remaining_timeout > 3000) {
                 crm_trace("rescheduling start, remaining timeout %d", cmd->remaining_timeout);
                 g_timeout_add(1000, retry_start_cmd_cb, lrm_state);
                 return;
 
             } else {
                 crm_trace("can't reschedule start, remaining timeout too small %d",
                           cmd->remaining_timeout);
                 pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                                     PCMK_EXEC_TIMEOUT,
                                     "%s without enough time to retry",
                                     pcmk_strerror(op->connection_rc));
             }
 
         } else {
             lrm_state_reset_tables(lrm_state, TRUE);
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             lrm_remote_set_flags(lrm_state, remote_active);
         }
 
         crm_debug("Remote connection event matched %s action", cmd->action);
         report_remote_ra_result(cmd);
         cmd_handled = TRUE;
 
     } else if ((op->type == lrmd_event_poke)
                && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                                pcmk__str_casei)) {
 
         if (cmd->monitor_timeout_id) {
             g_source_remove(cmd->monitor_timeout_id);
             cmd->monitor_timeout_id = 0;
         }
 
         /* Only report success the first time, after that only worry about failures.
          * For this function, if we get the poke pack, it is always a success. Pokes
          * only fail if the send fails, or the response times out. */
         if (!pcmk_is_set(cmd->status, cmd_reported_success)) {
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
             cmd_set_flags(cmd, cmd_reported_success);
         }
 
         crm_debug("Remote poke event matched %s action", cmd->action);
 
         /* success, keep rescheduling if interval is present. */
         if (cmd->interval_ms && !pcmk_is_set(cmd->status, cmd_cancel)) {
             ra_data->recurring_cmds = g_list_append(ra_data->recurring_cmds, cmd);
             cmd->interval_id = g_timeout_add(cmd->interval_ms,
                                              recurring_helper, cmd);
             cmd = NULL;         /* prevent free */
         }
         cmd_handled = TRUE;
 
     } else if ((op->type == lrmd_event_disconnect)
                && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                                pcmk__str_casei)) {
         if (pcmk_is_set(ra_data->status, remote_active) &&
             !pcmk_is_set(cmd->status, cmd_cancel)) {
             pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                              PCMK_EXEC_ERROR,
                              "Remote connection unexpectedly dropped "
                              "during monitor");
             report_remote_ra_result(cmd);
             crm_err("Remote connection to %s unexpectedly dropped during monitor",
                     lrm_state->node_name);
         }
         cmd_handled = TRUE;
 
     } else if ((op->type == lrmd_event_new_client)
                && pcmk__str_eq(cmd->action, PCMK_ACTION_STOP,
                                pcmk__str_casei)) {
 
         handle_remote_ra_stop(lrm_state, cmd);
         cmd_handled = TRUE;
 
     } else {
         crm_debug("Event did not match %s action", ra_data->cur_cmd->action);
     }
 
     if (cmd_handled) {
         ra_data->cur_cmd = NULL;
         if (ra_data->cmds) {
             mainloop_set_trigger(ra_data->work);
         }
         free_cmd(cmd);
     }
 }
 
 static void
 handle_remote_ra_stop(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd)
 {
     remote_ra_data_t *ra_data = NULL;
 
     CRM_ASSERT(lrm_state);
     ra_data = lrm_state->remote_ra_data;
 
     if (!pcmk_is_set(ra_data->status, takeover_complete)) {
         /* delete pending ops when ever the remote connection is intentionally stopped */
         g_hash_table_remove_all(lrm_state->active_ops);
     } else {
         /* we no longer hold the history if this connection has been migrated,
          * however, we keep metadata cache for future use */
         lrm_state_reset_tables(lrm_state, FALSE);
     }
 
     lrm_remote_clear_flags(lrm_state, remote_active);
     lrm_state_disconnect(lrm_state);
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     ra_data->cmds = NULL;
     ra_data->recurring_cmds = NULL;
     ra_data->cur_cmd = NULL;
 
     if (cmd) {
         pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
         report_remote_ra_result(cmd);
     }
 }
 
 // \return Standard Pacemaker return code
 static int
 handle_remote_ra_start(lrm_state_t * lrm_state, remote_ra_cmd_t * cmd, int timeout_ms)
 {
     const char *server = NULL;
     lrmd_key_value_t *tmp = NULL;
     int port = 0;
     int timeout_used = timeout_ms > MAX_START_TIMEOUT_MS ? MAX_START_TIMEOUT_MS : timeout_ms;
     int rc = pcmk_rc_ok;
 
     for (tmp = cmd->params; tmp; tmp = tmp->next) {
         if (pcmk__strcase_any_of(tmp->key,
                                  PCMK_REMOTE_RA_ADDR, PCMK_REMOTE_RA_SERVER,
                                  NULL)) {
             server = tmp->value;
 
         } else if (pcmk__str_eq(tmp->key, PCMK_REMOTE_RA_PORT,
                                 pcmk__str_none)) {
             port = atoi(tmp->value);
 
         } else if (pcmk__str_eq(tmp->key, CRM_META "_" PCMK__META_CONTAINER,
                                 pcmk__str_none)) {
             lrm_remote_set_flags(lrm_state, controlling_guest);
         }
     }
 
     rc = controld_connect_remote_executor(lrm_state, server, port,
                                           timeout_used);
     if (rc != pcmk_rc_ok) {
         pcmk__format_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                             PCMK_EXEC_ERROR,
                             "Could not connect to Pacemaker Remote node %s: %s",
                             lrm_state->node_name, pcmk_rc_str(rc));
     }
     return rc;
 }
 
 static gboolean
 handle_remote_ra_exec(gpointer user_data)
 {
     int rc = 0;
     lrm_state_t *lrm_state = user_data;
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     remote_ra_cmd_t *cmd;
     GList *first = NULL;
 
     if (ra_data->cur_cmd) {
         /* still waiting on previous cmd */
         return TRUE;
     }
 
     while (ra_data->cmds) {
         first = ra_data->cmds;
         cmd = first->data;
         if (cmd->delay_id) {
             /* still waiting for start delay timer to trip */
             return TRUE;
         }
 
         ra_data->cmds = g_list_remove_link(ra_data->cmds, first);
         g_list_free_1(first);
 
         if (pcmk__str_any_of(cmd->action, PCMK_ACTION_START,
                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
             lrm_remote_clear_flags(lrm_state, expect_takeover | takeover_complete);
             if (handle_remote_ra_start(lrm_state, cmd,
                                        cmd->timeout) == pcmk_rc_ok) {
                 /* take care of this later when we get async connection result */
                 crm_debug("Initiated async remote connection, %s action will complete after connect event",
                           cmd->action);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, PCMK_ACTION_MONITOR)) {
 
             if (lrm_state_is_connected(lrm_state) == TRUE) {
                 rc = lrm_state_poke_connection(lrm_state);
                 if (rc < 0) {
                     pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                                      PCMK_EXEC_ERROR, pcmk_strerror(rc));
                 }
             } else {
                 rc = -1;
                 pcmk__set_result(&(cmd->result), PCMK_OCF_NOT_RUNNING,
                                  PCMK_EXEC_DONE, "Remote connection inactive");
             }
 
             if (rc == 0) {
                 crm_debug("Poked Pacemaker Remote at node %s, waiting for async response",
                           cmd->rsc_id);
                 ra_data->cur_cmd = cmd;
                 cmd->monitor_timeout_id = g_timeout_add(cmd->timeout, monitor_timeout_cb, cmd);
                 return TRUE;
             }
             report_remote_ra_result(cmd);
 
         } else if (!strcmp(cmd->action, PCMK_ACTION_STOP)) {
 
             if (pcmk_is_set(ra_data->status, expect_takeover)) {
                 /* briefly wait on stop for the takeover event to occur. If the
                  * takeover event does not occur during the wait period, that's fine.
                  * It just means that the remote-node's lrm_status section is going to get
                  * cleared which will require all the resources running in the remote-node
                  * to be explicitly re-detected via probe actions.  If the takeover does occur
                  * successfully, then we can leave the status section intact. */
                 cmd->takeover_timeout_id = g_timeout_add((cmd->timeout/2), connection_takeover_timeout_cb, cmd);
                 ra_data->cur_cmd = cmd;
                 return TRUE;
             }
 
             handle_remote_ra_stop(lrm_state, cmd);
 
         } else if (strcmp(cmd->action, PCMK_ACTION_MIGRATE_TO) == 0) {
             lrm_remote_clear_flags(lrm_state, takeover_complete);
             lrm_remote_set_flags(lrm_state, expect_takeover);
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
 
         } else if (pcmk__str_any_of(cmd->action, PCMK_ACTION_RELOAD,
                                     PCMK_ACTION_RELOAD_AGENT, NULL))  {
             /* Currently the only reloadable parameter is
              * PCMK_REMOTE_RA_RECONNECT_INTERVAL, which is only used by the
              * scheduler via the CIB, so reloads are a no-op.
              *
              * @COMPAT DC <2.1.0: We only need to check for "reload" in case
              * we're in a rolling upgrade with a DC scheduling "reload" instead
              * of "reload-agent". An OCF 1.1 "reload" would be a no-op anyway,
              * so this would work for that purpose as well.
              */
             pcmk__set_result(&(cmd->result), PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
             report_remote_ra_result(cmd);
         }
 
         free_cmd(cmd);
     }
 
     return TRUE;
 }
 
 static void
 remote_ra_data_init(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = NULL;
 
     if (lrm_state->remote_ra_data) {
         return;
     }
 
     ra_data = pcmk__assert_alloc(1, sizeof(remote_ra_data_t));
     ra_data->work = mainloop_add_trigger(G_PRIORITY_HIGH, handle_remote_ra_exec, lrm_state);
     lrm_state->remote_ra_data = ra_data;
 }
 
 void
 remote_ra_cleanup(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
     if (!ra_data) {
         return;
     }
 
     if (ra_data->cmds) {
         g_list_free_full(ra_data->cmds, free_cmd);
     }
 
     if (ra_data->recurring_cmds) {
         g_list_free_full(ra_data->recurring_cmds, free_cmd);
     }
     mainloop_destroy_trigger(ra_data->work);
     free(ra_data);
     lrm_state->remote_ra_data = NULL;
 }
 
 gboolean
 is_remote_lrmd_ra(const char *agent, const char *provider, const char *id)
 {
     if (agent && provider && !strcmp(agent, REMOTE_LRMD_RA) && !strcmp(provider, "pacemaker")) {
         return TRUE;
     }
     if ((id != NULL) && (lrm_state_find(id) != NULL)
         && !pcmk__str_eq(id, controld_globals.our_nodename, pcmk__str_casei)) {
         return TRUE;
     }
 
     return FALSE;
 }
 
 lrmd_rsc_info_t *
 remote_ra_get_rsc_info(lrm_state_t * lrm_state, const char *rsc_id)
 {
     lrmd_rsc_info_t *info = NULL;
 
     if ((lrm_state_find(rsc_id))) {
         info = pcmk__assert_alloc(1, sizeof(lrmd_rsc_info_t));
 
         info->id = pcmk__str_copy(rsc_id);
         info->type = pcmk__str_copy(REMOTE_LRMD_RA);
         info->standard = pcmk__str_copy(PCMK_RESOURCE_CLASS_OCF);
         info->provider = pcmk__str_copy("pacemaker");
     }
 
     return info;
 }
 
 static gboolean
 is_remote_ra_supported_action(const char *action)
 {
     return pcmk__str_any_of(action,
                             PCMK_ACTION_START,
                             PCMK_ACTION_STOP,
                             PCMK_ACTION_MONITOR,
                             PCMK_ACTION_MIGRATE_TO,
                             PCMK_ACTION_MIGRATE_FROM,
                             PCMK_ACTION_RELOAD_AGENT,
                             PCMK_ACTION_RELOAD,
                             NULL);
 }
 
 static GList *
 fail_all_monitor_cmds(GList * list)
 {
     GList *rm_list = NULL;
     remote_ra_cmd_t *cmd = NULL;
     GList *gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms > 0)
             && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                             pcmk__str_casei)) {
             rm_list = g_list_append(rm_list, cmd);
         }
     }
 
     for (gIter = rm_list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
 
         pcmk__set_result(&(cmd->result), PCMK_OCF_UNKNOWN_ERROR,
                          PCMK_EXEC_ERROR, "Lost connection to remote executor");
         crm_trace("Pre-emptively failing %s %s (interval=%u, %s)",
                   cmd->action, cmd->rsc_id, cmd->interval_ms, cmd->userdata);
         report_remote_ra_result(cmd);
 
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
 
     /* frees only the list data, not the cmds */
     g_list_free(rm_list);
     return list;
 }
 
 static GList *
 remove_cmd(GList * list, const char *action, guint interval_ms)
 {
     remote_ra_cmd_t *cmd = NULL;
     GList *gIter = NULL;
 
     for (gIter = list; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, action, pcmk__str_casei)) {
             break;
         }
         cmd = NULL;
     }
     if (cmd) {
         list = g_list_remove(list, cmd);
         free_cmd(cmd);
     }
     return list;
 }
 
 int
 remote_ra_cancel(lrm_state_t *lrm_state, const char *rsc_id,
                  const char *action, guint interval_ms)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     connection_rsc = lrm_state_find(rsc_id);
     if (!connection_rsc || !connection_rsc->remote_ra_data) {
         return -EINVAL;
     }
 
     ra_data = connection_rsc->remote_ra_data;
     ra_data->cmds = remove_cmd(ra_data->cmds, action, interval_ms);
     ra_data->recurring_cmds = remove_cmd(ra_data->recurring_cmds, action,
                                          interval_ms);
     if (ra_data->cur_cmd &&
         (ra_data->cur_cmd->interval_ms == interval_ms) &&
         (pcmk__str_eq(ra_data->cur_cmd->action, action, pcmk__str_casei))) {
 
         cmd_set_flags(ra_data->cur_cmd, cmd_cancel);
     }
 
     return 0;
 }
 
 static remote_ra_cmd_t *
 handle_dup_monitor(remote_ra_data_t *ra_data, guint interval_ms,
                    const char *userdata)
 {
     GList *gIter = NULL;
     remote_ra_cmd_t *cmd = NULL;
 
     /* there are 3 places a potential duplicate monitor operation
      * could exist.
      * 1. recurring_cmds list. where the op is waiting for its next interval
      * 2. cmds list, where the op is queued to get executed immediately
      * 3. cur_cmd, which means the monitor op is in flight right now.
      */
     if (interval_ms == 0) {
         return NULL;
     }
 
     if (ra_data->cur_cmd &&
         !pcmk_is_set(ra_data->cur_cmd->status, cmd_cancel) &&
         (ra_data->cur_cmd->interval_ms == interval_ms)
         && pcmk__str_eq(ra_data->cur_cmd->action, PCMK_ACTION_MONITOR,
                         pcmk__str_casei)) {
 
         cmd = ra_data->cur_cmd;
         goto handle_dup;
     }
 
     for (gIter = ra_data->recurring_cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                             pcmk__str_casei)) {
             goto handle_dup;
         }
     }
 
     for (gIter = ra_data->cmds; gIter != NULL; gIter = gIter->next) {
         cmd = gIter->data;
         if ((cmd->interval_ms == interval_ms)
             && pcmk__str_eq(cmd->action, PCMK_ACTION_MONITOR,
                             pcmk__str_casei)) {
             goto handle_dup;
         }
     }
 
     return NULL;
 
 handle_dup:
 
     crm_trace("merging duplicate monitor cmd " PCMK__OP_FMT,
               cmd->rsc_id, PCMK_ACTION_MONITOR, interval_ms);
 
     /* update the userdata */
     if (userdata) {
        free(cmd->userdata);
        cmd->userdata = pcmk__str_copy(userdata);
     }
 
     /* if we've already reported success, generate a new call id */
     if (pcmk_is_set(cmd->status, cmd_reported_success)) {
         cmd->start_time = time(NULL);
         cmd->call_id = generate_callid();
         cmd_clear_flags(cmd, cmd_reported_success);
     }
 
     /* if we have an interval_id set, that means we are in the process of
      * waiting for this cmd's next interval. instead of waiting, cancel
      * the timer and execute the action immediately */
     if (cmd->interval_id) {
         g_source_remove(cmd->interval_id);
         cmd->interval_id = 0;
         recurring_helper(cmd);
     }
 
     return cmd;
 }
 
 /*!
  * \internal
  * \brief Execute an action using the (internal) ocf:pacemaker:remote agent
  *
  * \param[in]     lrm_state      Executor state object for remote connection
  * \param[in]     rsc_id         Connection resource ID
  * \param[in]     action         Action to execute
  * \param[in]     userdata       String to copy and pass to execution callback
  * \param[in]     interval_ms    Action interval (in milliseconds)
  * \param[in]     timeout_ms     Action timeout (in milliseconds)
  * \param[in]     start_delay_ms Delay (in milliseconds) before executing action
  * \param[in,out] params         Connection resource parameters
  * \param[out]    call_id        Where to store call ID on success
  *
  * \return Standard Pacemaker return code
  * \note This takes ownership of \p params, which should not be used or freed
  *       after calling this function.
  */
 int
 controld_execute_remote_agent(const lrm_state_t *lrm_state, const char *rsc_id,
                               const char *action, const char *userdata,
                               guint interval_ms, int timeout_ms,
                               int start_delay_ms, lrmd_key_value_t *params,
                               int *call_id)
 {
     lrm_state_t *connection_rsc = NULL;
     remote_ra_cmd_t *cmd = NULL;
     remote_ra_data_t *ra_data = NULL;
 
     *call_id = 0;
 
     CRM_CHECK((lrm_state != NULL) && (rsc_id != NULL) && (action != NULL)
               && (userdata != NULL) && (call_id != NULL),
               lrmd_key_value_freeall(params); return EINVAL);
 
     if (!is_remote_ra_supported_action(action)) {
         lrmd_key_value_freeall(params);
         return EOPNOTSUPP;
     }
 
     connection_rsc = lrm_state_find(rsc_id);
     if (connection_rsc == NULL) {
         lrmd_key_value_freeall(params);
         return ENOTCONN;
     }
 
     remote_ra_data_init(connection_rsc);
     ra_data = connection_rsc->remote_ra_data;
 
     cmd = handle_dup_monitor(ra_data, interval_ms, userdata);
     if (cmd) {
         *call_id = cmd->call_id;
         lrmd_key_value_freeall(params);
         return pcmk_rc_ok;
     }
 
     cmd = pcmk__assert_alloc(1, sizeof(remote_ra_cmd_t));
 
     cmd->owner = pcmk__str_copy(lrm_state->node_name);
     cmd->rsc_id = pcmk__str_copy(rsc_id);
     cmd->action = pcmk__str_copy(action);
     cmd->userdata = pcmk__str_copy(userdata);
     cmd->interval_ms = interval_ms;
     cmd->timeout = timeout_ms;
     cmd->start_delay = start_delay_ms;
     cmd->params = params;
     cmd->start_time = time(NULL);
 
     cmd->call_id = generate_callid();
 
     if (cmd->start_delay) {
         cmd->delay_id = g_timeout_add(cmd->start_delay, start_delay_helper, cmd);
     }
 
     ra_data->cmds = g_list_append(ra_data->cmds, cmd);
     mainloop_set_trigger(ra_data->work);
 
     *call_id = cmd->call_id;
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Immediately fail all monitors of a remote node, if proxied here
  *
  * \param[in] node_name  Name of pacemaker_remote node
  */
 void
 remote_ra_fail(const char *node_name)
 {
     lrm_state_t *lrm_state = lrm_state_find(node_name);
 
     if (lrm_state && lrm_state_is_connected(lrm_state)) {
         remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
 
         crm_info("Failing monitors on Pacemaker Remote node %s", node_name);
         ra_data->recurring_cmds = fail_all_monitor_cmds(ra_data->recurring_cmds);
         ra_data->cmds = fail_all_monitor_cmds(ra_data->cmds);
     }
 }
 
 /* A guest node fencing implied by host fencing looks like:
  *
  *  <pseudo_event id="103" operation="stonith" operation_key="stonith-lxc1-off"
  *                on_node="lxc1" on_node_uuid="lxc1">
  *     <attributes CRM_meta_on_node="lxc1" CRM_meta_on_node_uuid="lxc1"
  *                 CRM_meta_stonith_action="off" crm_feature_set="3.0.12"/>
  *     <downed>
  *       <node id="lxc1"/>
  *     </downed>
  *  </pseudo_event>
  */
 #define XPATH_PSEUDO_FENCE "/" PCMK__XE_PSEUDO_EVENT \
     "[@" PCMK_XA_OPERATION "='stonith']/" PCMK__XE_DOWNED "/" PCMK_XE_NODE
 
 /*!
  * \internal
  * \brief Check a pseudo-action for Pacemaker Remote node side effects
  *
  * \param[in,out] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_pseudo(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_FENCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *result = getXpathResult(search, 0);
 
         /* Normally, we handle the necessary side effects of a guest node stop
          * action when reporting the remote agent's result. However, if the stop
          * is implied due to fencing, it will be a fencing pseudo-event, and
          * there won't be a result to report. Handle that case here.
          *
          * This will result in a duplicate call to remote_node_down() if the
          * guest stop was real instead of implied, but that shouldn't hurt.
          *
          * There is still one corner case that isn't handled: if a guest node
          * isn't running any resources when its host is fenced, it will appear
          * to be cleanly stopped, so there will be no pseudo-fence, and our
          * peer cache state will be incorrect unless and until the guest is
          * recovered.
          */
         if (result) {
             const char *remote = pcmk__xe_id(result);
 
             if (remote) {
                 remote_node_down(remote, DOWN_ERASE_LRM);
             }
         }
     }
     freeXpathObject(search);
 }
 
 static void
 remote_ra_maintenance(lrm_state_t * lrm_state, gboolean maintenance)
 {
     xmlNode *update, *state;
     int call_opt;
     pcmk__node_status_t *node = NULL;
 
     call_opt = crmd_cib_smart_opt();
     node = pcmk__cluster_lookup_remote_node(lrm_state->node_name);
     CRM_CHECK(node != NULL, return);
     update = pcmk__xe_create(NULL, PCMK_XE_STATUS);
     state = create_node_state_update(node, node_update_none, update,
                                      __func__);
     crm_xml_add(state, PCMK__XA_NODE_IN_MAINTENANCE, (maintenance? "1" : "0"));
     if (controld_update_cib(PCMK_XE_STATUS, update, call_opt,
                             NULL) == pcmk_rc_ok) {
         /* TODO: still not 100% sure that async update will succeed ... */
         if (maintenance) {
             lrm_remote_set_flags(lrm_state, remote_in_maint);
         } else {
             lrm_remote_clear_flags(lrm_state, remote_in_maint);
         }
     }
     pcmk__xml_free(update);
 }
 
 #define XPATH_PSEUDO_MAINTENANCE "//" PCMK__XE_PSEUDO_EVENT         \
     "[@" PCMK_XA_OPERATION "='" PCMK_ACTION_MAINTENANCE_NODES "']/" \
     PCMK__XE_MAINTENANCE
 
 /*!
  * \internal
  * \brief Check a pseudo-action holding updates for maintenance state
  *
  * \param[in,out] xml  XML of pseudo-action to check
  */
 void
 remote_ra_process_maintenance_nodes(xmlNode *xml)
 {
     xmlXPathObjectPtr search = xpath_search(xml, XPATH_PSEUDO_MAINTENANCE);
 
     if (numXpathResults(search) == 1) {
         xmlNode *node;
         int cnt = 0, cnt_remote = 0;
 
         for (node = pcmk__xe_first_child(getXpathResult(search, 0),
                                          PCMK_XE_NODE, NULL, NULL);
              node != NULL; node = pcmk__xe_next_same(node)) {
 
             lrm_state_t *lrm_state = lrm_state_find(pcmk__xe_id(node));
 
             cnt++;
             if (lrm_state && lrm_state->remote_ra_data &&
                 pcmk_is_set(((remote_ra_data_t *) lrm_state->remote_ra_data)->status, remote_active)) {
 
                 const char *in_maint_s = NULL;
                 int in_maint;
 
                 cnt_remote++;
                 in_maint_s = crm_element_value(node,
                                                PCMK__XA_NODE_IN_MAINTENANCE);
                 pcmk__scan_min_int(in_maint_s, &in_maint, 0);
                 remote_ra_maintenance(lrm_state, in_maint);
             }
         }
         crm_trace("Action holds %d nodes (%d remotes found) adjusting "
                   PCMK_OPT_MAINTENANCE_MODE,
                   cnt, cnt_remote);
     }
     freeXpathObject(search);
 }
 
 gboolean
 remote_ra_is_in_maintenance(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     return pcmk_is_set(ra_data->status, remote_in_maint);
 }
 
 gboolean
 remote_ra_controlling_guest(lrm_state_t * lrm_state)
 {
     remote_ra_data_t *ra_data = lrm_state->remote_ra_data;
     return pcmk_is_set(ra_data->status, controlling_guest);
 }
diff --git a/include/crm/cluster.h b/include/crm/cluster.h
index 5810005537..e1bb210a25 100644
--- a/include/crm/cluster.h
+++ b/include/crm/cluster.h
@@ -1,113 +1,109 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_CLUSTER__H
 #  define PCMK__CRM_CLUSTER__H
 
 #  include <stdint.h>           // uint32_t, uint64_t
 #  include <glib.h>             // gboolean, GHashTable
 #  include <libxml/tree.h>      // xmlNode
 #  include <crm/common/xml.h>
 #  include <crm/common/util.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 #if SUPPORT_COROSYNC
 #include <corosync/cpg.h>       // cpg_callbacks_t
 #endif
 
 // @COMPAT Make this internal when we can break API backward compatibility
 //! \deprecated Do not use (public access will be removed in a future release)
 extern GHashTable *crm_peer_cache;
 
 // @COMPAT Make this internal when we can break API backward compatibility
 //! \deprecated Do not use (public access will be removed in a future release)
 extern GHashTable *crm_remote_peer_cache;
 
 // @COMPAT Make this internal when we can break API backward compatibility
 //! \deprecated Do not use (public access will be removed in a future release)
 extern unsigned long long crm_peer_seq;
 
 // @COMPAT Make this internal when we can break API backward compatibility
 //! \deprecated Do not use (public access will be removed in a future release)
 #define CRM_NODE_LOST      "lost"
 
-// @COMPAT Make this internal when we can break API backward compatibility
-//! \deprecated Do not use (public access will be removed in a future release)
-#define CRM_NODE_MEMBER    "member"
-
 //! \internal Do not use
 typedef struct pcmk__cluster_private pcmk__cluster_private_t;
 
 // Implementation of pcmk_cluster_t
 // @COMPAT Make contents internal when we can break API backward compatibility
 //!@{
 //! \deprecated Do not use (public access will be removed in a future release)
 struct pcmk__cluster {
     /* @COMPAT Once all members are moved to pcmk__cluster_private_t, we can
      * make that the pcmk_cluster_t implementation and drop this struct
      * altogether, leaving pcmk_cluster_t as an opaque public type.
      */
     //! \internal Do not use
     pcmk__cluster_private_t *priv;
 
     // NOTE: sbd (as of at least 1.5.2) uses this
     //! \deprecated Call pcmk_cluster_set_destroy_fn() to set this
     void (*destroy) (gpointer);
 
 #if SUPPORT_COROSYNC
     // NOTE: sbd (as of at least 1.5.2) uses this
     /*!
      * \deprecated Call pcmk_cpg_set_deliver_fn() and pcmk_cpg_set_confchg_fn()
      *             to set these
      */
     cpg_callbacks_t cpg;
 #endif  // SUPPORT_COROSYNC
 };
 //!@}
 
 //! Connection to a cluster layer
 typedef struct pcmk__cluster pcmk_cluster_t;
 
 int pcmk_cluster_connect(pcmk_cluster_t *cluster);
 int pcmk_cluster_disconnect(pcmk_cluster_t *cluster);
 
 pcmk_cluster_t *pcmk_cluster_new(void);
 void pcmk_cluster_free(pcmk_cluster_t *cluster);
 
 int pcmk_cluster_set_destroy_fn(pcmk_cluster_t *cluster, void (*fn)(gpointer));
 #if SUPPORT_COROSYNC
 int pcmk_cpg_set_deliver_fn(pcmk_cluster_t *cluster, cpg_deliver_fn_t fn);
 int pcmk_cpg_set_confchg_fn(pcmk_cluster_t *cluster, cpg_confchg_fn_t fn);
 #endif  // SUPPORT_COROSYNC
 
 /*!
  * \enum pcmk_cluster_layer
  * \brief Types of cluster layer
  */
 enum pcmk_cluster_layer {
     pcmk_cluster_layer_unknown  = 1,    //!< Unknown cluster layer
     pcmk_cluster_layer_invalid  = 2,    //!< Invalid cluster layer
     pcmk_cluster_layer_corosync = 32,   //!< Corosync Cluster Engine
 };
 
 enum pcmk_cluster_layer pcmk_get_cluster_layer(void);
 const char *pcmk_cluster_layer_text(enum pcmk_cluster_layer layer);
 
 #ifdef __cplusplus
 }
 #endif
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
 #include <crm/cluster/compat.h>
 #endif
 
 #endif
diff --git a/lib/cluster/corosync.c b/lib/cluster/corosync.c
index 1c4a4df62a..5cfa0f6dd6 100644
--- a/lib/cluster/corosync.c
+++ b/lib/cluster/corosync.c
@@ -1,816 +1,816 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <arpa/inet.h>
 #include <inttypes.h>                   // PRIu64, etc.
 #include <netdb.h>
 #include <netinet/in.h>
 #include <stdbool.h>
 #include <sys/socket.h>
 #include <sys/utsname.h>
 
 #include <bzlib.h>
 #include <corosync/cfg.h>
 #include <corosync/cmap.h>
 #include <corosync/corodefs.h>
 #include <corosync/corotypes.h>
 #include <corosync/hdb.h>
 #include <corosync/quorum.h>
 #include <qb/qbipcc.h>
 #include <qb/qbutil.h>
 
 #include <crm/cluster/internal.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>    // PCMK__SPECIAL_PID
 #include <crm/common/mainloop.h>
 #include <crm/common/xml.h>
 
 #include "crmcluster_private.h"
 
 static quorum_handle_t pcmk_quorum_handle = 0;
 
 static gboolean (*quorum_app_callback)(unsigned long long seq,
                                        gboolean quorate) = NULL;
 
 /*!
  * \internal
  * \brief Get the Corosync UUID associated with a Pacemaker node
  *
  * \param[in] node  Pacemaker node
  *
  * \return Newly allocated string with node's Corosync UUID, or NULL if unknown
  * \note It is the caller's responsibility to free the result with free().
  */
 char *
 pcmk__corosync_uuid(const pcmk__node_status_t *node)
 {
     CRM_ASSERT(pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync);
 
     if (node != NULL) {
         if (node->cluster_layer_id > 0) {
             return crm_strdup_printf("%" PRIu32, node->cluster_layer_id);
         } else {
             crm_info("Node %s is not yet known by Corosync", node->name);
         }
     }
     return NULL;
 }
 
 static bool
 node_name_is_valid(const char *key, const char *name)
 {
     int octet;
 
     if (name == NULL) {
         crm_trace("%s is empty", key);
         return false;
 
     } else if (sscanf(name, "%d.%d.%d.%d", &octet, &octet, &octet, &octet) == 4) {
         crm_trace("%s contains an IPv4 address (%s), ignoring", key, name);
         return false;
 
     } else if (strstr(name, ":") != NULL) {
         crm_trace("%s contains an IPv6 address (%s), ignoring", key, name);
         return false;
     }
     crm_trace("'%s: %s' is valid", key, name);
     return true;
 }
 
 /*
  * \internal
  * \brief Get Corosync node name corresponding to a node ID
  *
  * \param[in] cmap_handle  Connection to Corosync CMAP
  * \param[in] nodeid       Node ID to check
  *
  * \return Newly allocated string with name or (if no name) IP address
  *         associated with first address assigned to a Corosync node ID (or NULL
  *         if unknown)
  * \note It is the caller's responsibility to free the result with free().
  */
 char *
 pcmk__corosync_name(uint64_t /*cmap_handle_t */ cmap_handle, uint32_t nodeid)
 {
     // Originally based on corosync-quorumtool.c:node_name()
 
     int lpc = 0;
     cs_error_t rc = CS_OK;
     int retries = 0;
     char *name = NULL;
     cmap_handle_t local_handle = 0;
     int fd = -1;
     uid_t found_uid = 0;
     gid_t found_gid = 0;
     pid_t found_pid = 0;
     int rv;
 
     if (nodeid == 0) {
         nodeid = pcmk__cpg_local_nodeid(0);
     }
 
     if (cmap_handle == 0 && local_handle == 0) {
         retries = 0;
         crm_trace("Initializing CMAP connection");
         do {
             rc = pcmk__init_cmap(&local_handle);
             if (rc != CS_OK) {
                 retries++;
                 crm_debug("API connection setup failed: %s.  Retrying in %ds", cs_strerror(rc),
                           retries);
                 sleep(retries);
             }
 
         } while (retries < 5 && rc != CS_OK);
 
         if (rc != CS_OK) {
             crm_warn("Could not connect to Cluster Configuration Database API, error %s",
                      cs_strerror(rc));
             local_handle = 0;
         }
     }
 
     if (cmap_handle == 0) {
         cmap_handle = local_handle;
 
         rc = cmap_fd_get(cmap_handle, &fd);
         if (rc != CS_OK) {
             crm_err("Could not obtain the CMAP API connection: %s (%d)",
                     cs_strerror(rc), rc);
             goto bail;
         }
 
         /* CMAP provider run as root (in given user namespace, anyway)? */
         if (!(rv = crm_ipc_is_authentic_process(fd, (uid_t) 0,(gid_t) 0, &found_pid,
                                                 &found_uid, &found_gid))) {
             crm_err("CMAP provider is not authentic:"
                     " process %lld (uid: %lld, gid: %lld)",
                     (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
                     (long long) found_uid, (long long) found_gid);
             goto bail;
         } else if (rv < 0) {
             crm_err("Could not verify authenticity of CMAP provider: %s (%d)",
                     strerror(-rv), -rv);
             goto bail;
         }
     }
 
     while (name == NULL && cmap_handle != 0) {
         uint32_t id = 0;
         char *key = NULL;
 
         key = crm_strdup_printf("nodelist.node.%d.nodeid", lpc);
         rc = cmap_get_uint32(cmap_handle, key, &id);
         crm_trace("Checking %u vs %u from %s", nodeid, id, key);
         free(key);
 
         if (rc != CS_OK) {
             break;
         }
 
         if (nodeid == id) {
             crm_trace("Searching for node name for %u in nodelist.node.%d %s",
                       nodeid, lpc, pcmk__s(name, "<null>"));
             if (name == NULL) {
                 key = crm_strdup_printf("nodelist.node.%d.name", lpc);
                 cmap_get_string(cmap_handle, key, &name);
                 crm_trace("%s = %s", key, pcmk__s(name, "<null>"));
                 free(key);
             }
             if (name == NULL) {
                 key = crm_strdup_printf("nodelist.node.%d.ring0_addr", lpc);
                 cmap_get_string(cmap_handle, key, &name);
                 crm_trace("%s = %s", key, pcmk__s(name, "<null>"));
 
                 if (!node_name_is_valid(key, name)) {
                     free(name);
                     name = NULL;
                 }
                 free(key);
             }
             break;
         }
 
         lpc++;
     }
 
 bail:
     if(local_handle) {
         cmap_finalize(local_handle);
     }
 
     if (name == NULL) {
         crm_info("Unable to get node name for nodeid %u", nodeid);
     }
     return name;
 }
 
 /*!
  * \internal
  * \brief Disconnect from Corosync cluster
  *
  * \param[in,out] cluster  Cluster object to disconnect
  */
 void
 pcmk__corosync_disconnect(pcmk_cluster_t *cluster)
 {
     pcmk__cpg_disconnect(cluster);
 
     if (pcmk_quorum_handle != 0) {
         quorum_finalize(pcmk_quorum_handle);
         pcmk_quorum_handle = 0;
     }
     crm_notice("Disconnected from Corosync");
 }
 
 /*!
  * \internal
  * \brief Dispatch function for quorum connection file descriptor
  *
  * \param[in] user_data  Ignored
  *
  * \return 0 on success, -1 on error (per mainloop_io_t interface)
  */
 static int
 quorum_dispatch_cb(gpointer user_data)
 {
     int rc = quorum_dispatch(pcmk_quorum_handle, CS_DISPATCH_ALL);
 
     if (rc < 0) {
         crm_err("Connection to the Quorum API failed: %d", rc);
         quorum_finalize(pcmk_quorum_handle);
         pcmk_quorum_handle = 0;
         return -1;
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Notification callback for Corosync quorum connection
  *
  * \param[in] handle             Corosync quorum connection
  * \param[in] quorate            Whether cluster is quorate
  * \param[in] ring_id            Corosync ring ID
  * \param[in] view_list_entries  Number of entries in \p view_list
  * \param[in] view_list          Corosync node IDs in membership
  */
 static void
 quorum_notification_cb(quorum_handle_t handle, uint32_t quorate,
                        uint64_t ring_id, uint32_t view_list_entries,
                        uint32_t *view_list)
 {
     int i;
     GHashTableIter iter;
     pcmk__node_status_t *node = NULL;
     static gboolean init_phase = TRUE;
 
     bool is_quorate = (quorate != 0);
     bool was_quorate = pcmk__cluster_has_quorum();
 
     if (is_quorate && !was_quorate) {
         crm_notice("Quorum acquired " QB_XS " membership=%" PRIu64
                    " members=%" PRIu32,
                    ring_id, view_list_entries);
         pcmk__cluster_set_quorum(true);
 
     } else if (!is_quorate && was_quorate) {
         crm_warn("Quorum lost " QB_XS " membership=%" PRIu64 " members=" PRIu32,
                  ring_id, view_list_entries);
         pcmk__cluster_set_quorum(false);
 
     } else {
         crm_info("Quorum %s " QB_XS " membership=%" PRIu64 " members=%" PRIu32,
                  (is_quorate? "retained" : "still lost"), ring_id,
                  view_list_entries);
     }
 
     if (view_list_entries == 0 && init_phase) {
         crm_info("Corosync membership is still forming, ignoring");
         return;
     }
 
     init_phase = FALSE;
 
     /* Reset membership_id for all cached nodes so we can tell which ones aren't
      * in the view list */
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         node->membership_id = 0;
     }
 
     /* Update the peer cache for each node in view list */
     for (i = 0; i < view_list_entries; i++) {
         uint32_t id = view_list[i];
 
         crm_debug("Member[%d] %u ", i, id);
 
         /* Get this node's peer cache entry (adding one if not already there) */
         node = pcmk__get_node(id, NULL, NULL, pcmk__node_search_cluster_member);
         if (node->name == NULL) {
             char *name = pcmk__corosync_name(0, id);
 
             crm_info("Obtaining name for new node %u", id);
             node = pcmk__get_node(id, name, NULL,
                                   pcmk__node_search_cluster_member);
             free(name);
         }
 
         // Update the node state (including updating membership_id to ring_id)
-        pcmk__update_peer_state(__func__, node, CRM_NODE_MEMBER, ring_id);
+        pcmk__update_peer_state(__func__, node, PCMK_VALUE_MEMBER, ring_id);
     }
 
     /* Remove any peer cache entries we didn't update */
     pcmk__reap_unseen_nodes(ring_id);
 
     if (quorum_app_callback) {
         quorum_app_callback(ring_id, is_quorate);
     }
 }
 
 /*!
  * \internal
  * \brief Connect to Corosync quorum service
  *
  * \param[in] dispatch   Connection dispatch callback
  * \param[in] destroy    Connection destroy callback
  */
 void
 pcmk__corosync_quorum_connect(gboolean (*dispatch)(unsigned long long,
                                                    gboolean),
                               void (*destroy)(gpointer))
 {
     cs_error_t rc;
     int fd = 0;
     int quorate = 0;
     uint32_t quorum_type = 0;
     struct mainloop_fd_callbacks quorum_fd_callbacks;
     uid_t found_uid = 0;
     gid_t found_gid = 0;
     pid_t found_pid = 0;
     int rv;
 
     quorum_fd_callbacks.dispatch = quorum_dispatch_cb;
     quorum_fd_callbacks.destroy = destroy;
 
     crm_debug("Configuring Pacemaker to obtain quorum from Corosync");
 
     {
 #if 0
         // New way but not supported by all Corosync 2 versions
         quorum_model_v0_data_t quorum_model_data = {
             .model = QUORUM_MODEL_V0,
             .quorum_notify_fn = quorum_notification_cb,
         };
 
         rc = quorum_model_initialize(&pcmk_quorum_handle, QUORUM_MODEL_V0,
                                      (quorum_model_data_t *) &quorum_model_data,
                                      &quorum_type, NULL);
 #else
         quorum_callbacks_t quorum_callbacks = {
             .quorum_notify_fn = quorum_notification_cb,
         };
 
         rc = quorum_initialize(&pcmk_quorum_handle, &quorum_callbacks,
                                &quorum_type);
 #endif
     }
 
     if (rc != CS_OK) {
         crm_err("Could not connect to the Quorum API: %s (%d)",
                 cs_strerror(rc), rc);
         goto bail;
 
     } else if (quorum_type != QUORUM_SET) {
         crm_err("Corosync quorum is not configured");
         goto bail;
     }
 
     rc = quorum_fd_get(pcmk_quorum_handle, &fd);
     if (rc != CS_OK) {
         crm_err("Could not obtain the Quorum API connection: %s (%d)",
                 strerror(rc), rc);
         goto bail;
     }
 
     /* Quorum provider run as root (in given user namespace, anyway)? */
     if (!(rv = crm_ipc_is_authentic_process(fd, (uid_t) 0,(gid_t) 0, &found_pid,
                                             &found_uid, &found_gid))) {
         crm_err("Quorum provider is not authentic:"
                 " process %lld (uid: %lld, gid: %lld)",
                 (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
                 (long long) found_uid, (long long) found_gid);
         rc = CS_ERR_ACCESS;
         goto bail;
     } else if (rv < 0) {
         crm_err("Could not verify authenticity of Quorum provider: %s (%d)",
                 strerror(-rv), -rv);
         rc = CS_ERR_ACCESS;
         goto bail;
     }
 
     rc = quorum_getquorate(pcmk_quorum_handle, &quorate);
     if (rc != CS_OK) {
         crm_err("Could not obtain the current Quorum API state: %d", rc);
         goto bail;
     }
 
     if (quorate) {
         crm_notice("Quorum acquired");
     } else {
         crm_warn("No quorum");
     }
     quorum_app_callback = dispatch;
     pcmk__cluster_set_quorum(quorate != 0);
 
     rc = quorum_trackstart(pcmk_quorum_handle, CS_TRACK_CHANGES | CS_TRACK_CURRENT);
     if (rc != CS_OK) {
         crm_err("Could not setup Quorum API notifications: %d", rc);
         goto bail;
     }
 
     mainloop_add_fd("quorum", G_PRIORITY_HIGH, fd, dispatch, &quorum_fd_callbacks);
 
     pcmk__corosync_add_nodes(NULL);
 
   bail:
     if (rc != CS_OK) {
         quorum_finalize(pcmk_quorum_handle);
     }
 }
 
 /*!
  * \internal
  * \brief Connect to Corosync cluster layer
  *
  * \param[in,out] cluster  Initialized cluster object to connect
  *
  * \return Standard Pacemaker return code
  */
 int
 pcmk__corosync_connect(pcmk_cluster_t *cluster)
 {
     const enum pcmk_cluster_layer cluster_layer = pcmk_get_cluster_layer();
     const char *cluster_layer_s = pcmk_cluster_layer_text(cluster_layer);
     int rc = pcmk_rc_ok;
 
     pcmk__cluster_init_node_caches();
 
     if (cluster_layer != pcmk_cluster_layer_corosync) {
         crm_err("Invalid cluster layer: %s " QB_XS " cluster_layer=%d",
                 cluster_layer_s, cluster_layer);
         return EINVAL;
     }
 
     rc = pcmk__cpg_connect(cluster);
     if (rc != pcmk_rc_ok) {
         // Error message was logged by pcmk__cpg_connect()
         return rc;
     }
     crm_info("Connection to %s established", cluster_layer_s);
 
     cluster->priv->node_id = pcmk__cpg_local_nodeid(0);
     if (cluster->priv->node_id == 0) {
         crm_err("Could not determine local node ID");
         return ENXIO;
     }
 
     cluster->priv->node_name = pcmk__cluster_node_name(0);
     if (cluster->priv->node_name == NULL) {
         crm_err("Could not determine local node name");
         return ENXIO;
     }
 
     // Ensure local node always exists in peer cache
     pcmk__get_node(cluster->priv->node_id, cluster->priv->node_name, NULL,
                    pcmk__node_search_cluster_member);
 
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Check whether a Corosync cluster is active
  *
  * \return \c true if Corosync is found active, or \c false otherwise
  */
 bool
 pcmk__corosync_is_active(void)
 {
     cmap_handle_t handle;
     int rc = pcmk__init_cmap(&handle);
 
     if (rc == CS_OK) {
         cmap_finalize(handle);
         return true;
     }
 
     crm_info("Failed to initialize the cmap API: %s (%d)",
              pcmk__cs_err_str(rc), rc);
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a Corosync cluster peer is active
  *
  * \param[in] node  Node to check
  *
  * \return \c true if \p node is an active Corosync peer, or \c false otherwise
  */
 bool
 pcmk__corosync_is_peer_active(const pcmk__node_status_t *node)
 {
     if (node == NULL) {
         crm_trace("Corosync peer inactive: NULL");
         return false;
     }
-    if (!pcmk__str_eq(node->state, CRM_NODE_MEMBER, pcmk__str_none)) {
+    if (!pcmk__str_eq(node->state, PCMK_VALUE_MEMBER, pcmk__str_none)) {
         crm_trace("Corosync peer %s inactive: state=%s",
                   node->name, node->state);
         return false;
     }
     if (!pcmk_is_set(node->processes, crm_proc_cpg)) {
         crm_trace("Corosync peer %s inactive " QB_XS " processes=%.16" PRIx32,
                   node->name, node->processes);
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Load Corosync node list (via CMAP) into peer cache and optionally XML
  *
  * \param[in,out] xml_parent  If not NULL, add <node> entry here for each node
  *
  * \return true if any nodes were found, false otherwise
  */
 bool
 pcmk__corosync_add_nodes(xmlNode *xml_parent)
 {
     int lpc = 0;
     cs_error_t rc = CS_OK;
     int retries = 0;
     bool any = false;
     cmap_handle_t cmap_handle;
     int fd = -1;
     uid_t found_uid = 0;
     gid_t found_gid = 0;
     pid_t found_pid = 0;
     int rv;
 
     do {
         rc = pcmk__init_cmap(&cmap_handle);
         if (rc != CS_OK) {
             retries++;
             crm_debug("API connection setup failed: %s.  Retrying in %ds", cs_strerror(rc),
                       retries);
             sleep(retries);
         }
 
     } while (retries < 5 && rc != CS_OK);
 
     if (rc != CS_OK) {
         crm_warn("Could not connect to Cluster Configuration Database API, error %d", rc);
         return false;
     }
 
     rc = cmap_fd_get(cmap_handle, &fd);
     if (rc != CS_OK) {
         crm_err("Could not obtain the CMAP API connection: %s (%d)",
                 cs_strerror(rc), rc);
         goto bail;
     }
 
     /* CMAP provider run as root (in given user namespace, anyway)? */
     if (!(rv = crm_ipc_is_authentic_process(fd, (uid_t) 0,(gid_t) 0, &found_pid,
                                             &found_uid, &found_gid))) {
         crm_err("CMAP provider is not authentic:"
                 " process %lld (uid: %lld, gid: %lld)",
                 (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
                 (long long) found_uid, (long long) found_gid);
         goto bail;
     } else if (rv < 0) {
         crm_err("Could not verify authenticity of CMAP provider: %s (%d)",
                 strerror(-rv), -rv);
         goto bail;
     }
 
     pcmk__cluster_init_node_caches();
     crm_trace("Initializing Corosync node list");
     for (lpc = 0; TRUE; lpc++) {
         uint32_t nodeid = 0;
         char *name = NULL;
         char *key = NULL;
 
         key = crm_strdup_printf("nodelist.node.%d.nodeid", lpc);
         rc = cmap_get_uint32(cmap_handle, key, &nodeid);
         free(key);
 
         if (rc != CS_OK) {
             break;
         }
 
         name = pcmk__corosync_name(cmap_handle, nodeid);
         if (name != NULL) {
             GHashTableIter iter;
             pcmk__node_status_t *node = NULL;
 
             g_hash_table_iter_init(&iter, crm_peer_cache);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
                 if ((node != NULL)
                     && (node->cluster_layer_id > 0)
                     && (node->cluster_layer_id != nodeid)
                     && pcmk__str_eq(node->name, name, pcmk__str_casei)) {
 
                     crm_crit("Nodes %" PRIu32 " and %" PRIu32 " share the "
                              "same name '%s': shutting down",
                              node->cluster_layer_id, nodeid, name);
                     crm_exit(CRM_EX_FATAL);
                 }
             }
         }
 
         if (nodeid > 0 || name != NULL) {
             crm_trace("Initializing node[%d] %u = %s", lpc, nodeid, name);
             pcmk__get_node(nodeid, name, NULL, pcmk__node_search_cluster_member);
         }
 
         if (nodeid > 0 && name != NULL) {
             any = true;
 
             if (xml_parent) {
                 xmlNode *node = pcmk__xe_create(xml_parent, PCMK_XE_NODE);
 
                 pcmk__xe_set_id(node, "%u", nodeid);
                 crm_xml_add(node, PCMK_XA_UNAME, name);
             }
         }
 
         free(name);
     }
 bail:
     cmap_finalize(cmap_handle);
     return any;
 }
 
 /*!
  * \internal
  * \brief Get cluster name from Corosync configuration (via CMAP)
  *
  * \return Newly allocated string with cluster name if configured, or NULL
  */
 char *
 pcmk__corosync_cluster_name(void)
 {
     cmap_handle_t handle;
     char *cluster_name = NULL;
     cs_error_t rc = CS_OK;
     int fd = -1;
     uid_t found_uid = 0;
     gid_t found_gid = 0;
     pid_t found_pid = 0;
     int rv;
 
     rc = pcmk__init_cmap(&handle);
     if (rc != CS_OK) {
         crm_info("Failed to initialize the cmap API: %s (%d)",
                  cs_strerror(rc), rc);
         return NULL;
     }
 
     rc = cmap_fd_get(handle, &fd);
     if (rc != CS_OK) {
         crm_err("Could not obtain the CMAP API connection: %s (%d)",
                 cs_strerror(rc), rc);
         goto bail;
     }
 
     /* CMAP provider run as root (in given user namespace, anyway)? */
     if (!(rv = crm_ipc_is_authentic_process(fd, (uid_t) 0,(gid_t) 0, &found_pid,
                                             &found_uid, &found_gid))) {
         crm_err("CMAP provider is not authentic:"
                 " process %lld (uid: %lld, gid: %lld)",
                 (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
                 (long long) found_uid, (long long) found_gid);
         goto bail;
     } else if (rv < 0) {
         crm_err("Could not verify authenticity of CMAP provider: %s (%d)",
                 strerror(-rv), -rv);
         goto bail;
     }
 
     rc = cmap_get_string(handle, "totem.cluster_name", &cluster_name);
     if (rc != CS_OK) {
         crm_info("Cannot get totem.cluster_name: %s (%d)", cs_strerror(rc), rc);
 
     } else {
         crm_debug("cmap totem.cluster_name = '%s'", cluster_name);
     }
 
 bail:
     cmap_finalize(handle);
     return cluster_name;
 }
 
 /*!
  * \internal
  * \brief Check (via CMAP) whether Corosync configuration has a node list
  *
  * \return true if Corosync has node list, otherwise false
  */
 bool
 pcmk__corosync_has_nodelist(void)
 {
     cs_error_t cs_rc = CS_OK;
     int retries = 0;
     cmap_handle_t cmap_handle;
     cmap_iter_handle_t iter_handle;
     char key_name[CMAP_KEYNAME_MAXLEN + 1];
     int fd = -1;
     uid_t found_uid = 0;
     gid_t found_gid = 0;
     pid_t found_pid = 0;
     int rc = pcmk_ok;
 
     static bool got_result = false;
     static bool result = false;
 
     if (got_result) {
         return result;
     }
 
     // Connect to CMAP
     do {
         cs_rc = pcmk__init_cmap(&cmap_handle);
         if (cs_rc != CS_OK) {
             retries++;
             crm_debug("CMAP connection failed: %s (rc=%d, retrying in %ds)",
                       cs_strerror(cs_rc), cs_rc, retries);
             sleep(retries);
         }
     } while ((retries < 5) && (cs_rc != CS_OK));
     if (cs_rc != CS_OK) {
         crm_warn("Assuming Corosync does not have node list: "
                  "CMAP connection failed (%s) " QB_XS " rc=%d",
                  cs_strerror(cs_rc), cs_rc);
         return false;
     }
 
     // Get CMAP connection file descriptor
     cs_rc = cmap_fd_get(cmap_handle, &fd);
     if (cs_rc != CS_OK) {
         crm_warn("Assuming Corosync does not have node list: "
                  "CMAP unusable (%s) " QB_XS " rc=%d",
                  cs_strerror(cs_rc), cs_rc);
         goto bail;
     }
 
     // Check whether CMAP connection is authentic (i.e. provided by root)
     rc = crm_ipc_is_authentic_process(fd, (uid_t) 0, (gid_t) 0,
                                       &found_pid, &found_uid, &found_gid);
     if (rc == 0) {
         crm_warn("Assuming Corosync does not have node list: "
                  "CMAP provider is inauthentic "
                  QB_XS " pid=%lld uid=%lld gid=%lld",
                  (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
                  (long long) found_uid, (long long) found_gid);
         goto bail;
     } else if (rc < 0) {
         crm_warn("Assuming Corosync does not have node list: "
                  "Could not verify CMAP authenticity (%s) " QB_XS " rc=%d",
                   pcmk_strerror(rc), rc);
         goto bail;
     }
 
     // Check whether nodelist section is presetn
     cs_rc = cmap_iter_init(cmap_handle, "nodelist", &iter_handle);
     if (cs_rc != CS_OK) {
         crm_warn("Assuming Corosync does not have node list: "
                  "CMAP not readable (%s) " QB_XS " rc=%d",
                  cs_strerror(cs_rc), cs_rc);
         goto bail;
     }
 
     cs_rc = cmap_iter_next(cmap_handle, iter_handle, key_name, NULL, NULL);
     if (cs_rc == CS_OK) {
         result = true;
     }
 
     cmap_iter_finalize(cmap_handle, iter_handle);
     got_result = true;
     crm_debug("Corosync %s node list", (result? "has" : "does not have"));
 
 bail:
     cmap_finalize(cmap_handle);
     return result;
 }
diff --git a/lib/cluster/cpg.c b/lib/cluster/cpg.c
index 53b206b1be..74579f6b10 100644
--- a/lib/cluster/cpg.c
+++ b/lib/cluster/cpg.c
@@ -1,1060 +1,1060 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <arpa/inet.h>
 #include <inttypes.h>                   // PRIu32
 #include <netdb.h>
 #include <netinet/in.h>
 #include <stdbool.h>
 #include <stdint.h>                     // uint32_t
 #include <sys/socket.h>
 #include <sys/types.h>                  // size_t
 #include <sys/utsname.h>
 
 #include <bzlib.h>
 #include <corosync/corodefs.h>
 #include <corosync/corotypes.h>
 #include <corosync/hdb.h>
 #include <corosync/cpg.h>
 #include <qb/qbipc_common.h>
 #include <qb/qbipcc.h>
 #include <qb/qbutil.h>
 
 #include <crm/cluster/internal.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>    // PCMK__SPECIAL_PID
 #include <crm/common/mainloop.h>
 #include <crm/common/xml.h>
 
 #include "crmcluster_private.h"
 
 /* @TODO Once we can update the public API to require pcmk_cluster_t* in more
  *       functions, we can ditch this in favor of cluster->cpg_handle.
  */
 static cpg_handle_t pcmk_cpg_handle = 0;
 
 // @TODO These could be moved to pcmk_cluster_t* at that time as well
 static bool cpg_evicted = false;
 static GList *cs_message_queue = NULL;
 static int cs_message_timer = 0;
 
 struct pcmk__cpg_host_s {
     uint32_t id;
     uint32_t pid;
     enum pcmk__cluster_msg type;
     uint32_t size;
     char uname[MAX_NAME];
 } __attribute__ ((packed));
 
 typedef struct pcmk__cpg_host_s pcmk__cpg_host_t;
 
 struct pcmk__cpg_msg_s {
     struct qb_ipc_response_header header __attribute__ ((aligned(8)));
     uint32_t id;
     gboolean is_compressed;
 
     pcmk__cpg_host_t host;
     pcmk__cpg_host_t sender;
 
     uint32_t size;
     uint32_t compressed_size;
     /* 584 bytes */
     char data[0];
 
 } __attribute__ ((packed));
 
 typedef struct pcmk__cpg_msg_s pcmk__cpg_msg_t;
 
 static void crm_cs_flush(gpointer data);
 
 #define msg_data_len(msg) (msg->is_compressed?msg->compressed_size:msg->size)
 
 #define cs_repeat(rc, counter, max, code) do {                          \
         rc = code;                                                      \
         if ((rc == CS_ERR_TRY_AGAIN) || (rc == CS_ERR_QUEUE_FULL)) {    \
             counter++;                                                  \
             crm_debug("Retrying operation after %ds", counter);         \
             sleep(counter);                                             \
         } else {                                                        \
             break;                                                      \
         }                                                               \
     } while (counter < max)
 
 /*!
  * \internal
  * \brief Get the local Corosync node ID (via CPG)
  *
  * \param[in] handle  CPG connection to use (or 0 to use new connection)
  *
  * \return Corosync ID of local node (or 0 if not known)
  */
 uint32_t
 pcmk__cpg_local_nodeid(cpg_handle_t handle)
 {
     cs_error_t rc = CS_OK;
     int retries = 0;
     static uint32_t local_nodeid = 0;
     cpg_handle_t local_handle = handle;
     cpg_model_v1_data_t cpg_model_info = {CPG_MODEL_V1, NULL, NULL, NULL, 0};
     int fd = -1;
     uid_t found_uid = 0;
     gid_t found_gid = 0;
     pid_t found_pid = 0;
     int rv = 0;
 
     if (local_nodeid != 0) {
         return local_nodeid;
     }
 
     if (handle == 0) {
         crm_trace("Creating connection");
         cs_repeat(rc, retries, 5,
                   cpg_model_initialize(&local_handle, CPG_MODEL_V1,
                                        (cpg_model_data_t *) &cpg_model_info,
                                        NULL));
         if (rc != CS_OK) {
             crm_err("Could not connect to the CPG API: %s (%d)",
                     cs_strerror(rc), rc);
             return 0;
         }
 
         rc = cpg_fd_get(local_handle, &fd);
         if (rc != CS_OK) {
             crm_err("Could not obtain the CPG API connection: %s (%d)",
                     cs_strerror(rc), rc);
             goto bail;
         }
 
         // CPG provider run as root (at least in given user namespace)?
         rv = crm_ipc_is_authentic_process(fd, (uid_t) 0, (gid_t) 0, &found_pid,
                                           &found_uid, &found_gid);
         if (rv == 0) {
             crm_err("CPG provider is not authentic:"
                     " process %lld (uid: %lld, gid: %lld)",
                     (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
                     (long long) found_uid, (long long) found_gid);
             goto bail;
 
         } else if (rv < 0) {
             crm_err("Could not verify authenticity of CPG provider: %s (%d)",
                     strerror(-rv), -rv);
             goto bail;
         }
     }
 
     if (rc == CS_OK) {
         retries = 0;
         crm_trace("Performing lookup");
         cs_repeat(rc, retries, 5, cpg_local_get(local_handle, &local_nodeid));
     }
 
     if (rc != CS_OK) {
         crm_err("Could not get local node id from the CPG API: %s (%d)",
                 pcmk__cs_err_str(rc), rc);
     }
 
 bail:
     if (handle == 0) {
         crm_trace("Closing connection");
         cpg_finalize(local_handle);
     }
     crm_debug("Local nodeid is %u", local_nodeid);
     return local_nodeid;
 }
 
 /*!
  * \internal
  * \brief Callback function for Corosync message queue timer
  *
  * \param[in] data  CPG handle
  *
  * \return FALSE (to indicate to glib that timer should not be removed)
  */
 static gboolean
 crm_cs_flush_cb(gpointer data)
 {
     cs_message_timer = 0;
     crm_cs_flush(data);
     return FALSE;
 }
 
 // Send no more than this many CPG messages in one flush
 #define CS_SEND_MAX 200
 
 /*!
  * \internal
  * \brief Send messages in Corosync CPG message queue
  *
  * \param[in] data   CPG handle
  */
 static void
 crm_cs_flush(gpointer data)
 {
     unsigned int sent = 0;
     guint queue_len = 0;
     cs_error_t rc = 0;
     cpg_handle_t *handle = (cpg_handle_t *) data;
 
     if (*handle == 0) {
         crm_trace("Connection is dead");
         return;
     }
 
     queue_len = g_list_length(cs_message_queue);
     if (((queue_len % 1000) == 0) && (queue_len > 1)) {
         crm_err("CPG queue has grown to %d", queue_len);
 
     } else if (queue_len == CS_SEND_MAX) {
         crm_warn("CPG queue has grown to %d", queue_len);
     }
 
     if (cs_message_timer != 0) {
         /* There is already a timer, wait until it goes off */
         crm_trace("Timer active %d", cs_message_timer);
         return;
     }
 
     while ((cs_message_queue != NULL) && (sent < CS_SEND_MAX)) {
         struct iovec *iov = cs_message_queue->data;
 
         rc = cpg_mcast_joined(*handle, CPG_TYPE_AGREED, iov, 1);
         if (rc != CS_OK) {
             break;
         }
 
         sent++;
         crm_trace("CPG message sent, size=%llu",
                   (unsigned long long) iov->iov_len);
 
         cs_message_queue = g_list_remove(cs_message_queue, iov);
         free(iov->iov_base);
         free(iov);
     }
 
     queue_len -= sent;
     do_crm_log((queue_len > 5)? LOG_INFO : LOG_TRACE,
                "Sent %u CPG message%s (%d still queued): %s (rc=%d)",
                sent, pcmk__plural_s(sent), queue_len, pcmk__cs_err_str(rc),
                (int) rc);
 
     if (cs_message_queue) {
         uint32_t delay_ms = 100;
         if (rc != CS_OK) {
             /* Proportionally more if sending failed but cap at 1s */
             delay_ms = QB_MIN(1000, CS_SEND_MAX + (10 * queue_len));
         }
         cs_message_timer = g_timeout_add(delay_ms, crm_cs_flush_cb, data);
     }
 }
 
 /*!
  * \internal
  * \brief Dispatch function for CPG handle
  *
  * \param[in,out] user_data  Cluster object
  *
  * \return 0 on success, -1 on error (per mainloop_io_t interface)
  */
 static int
 pcmk_cpg_dispatch(gpointer user_data)
 {
     cs_error_t rc = CS_OK;
     pcmk_cluster_t *cluster = (pcmk_cluster_t *) user_data;
 
     rc = cpg_dispatch(cluster->priv->cpg_handle, CS_DISPATCH_ONE);
     if (rc != CS_OK) {
         crm_err("Connection to the CPG API failed: %s (%d)",
                 pcmk__cs_err_str(rc), rc);
         cpg_finalize(cluster->priv->cpg_handle);
         cluster->priv->cpg_handle = 0;
         return -1;
 
     } else if (cpg_evicted) {
         crm_err("Evicted from CPG membership");
         return -1;
     }
     return 0;
 }
 
 static inline const char *
 ais_dest(const pcmk__cpg_host_t *host)
 {
     return (host->size > 0)? host->uname : "<all>";
 }
 
 static inline const char *
 msg_type2text(enum pcmk__cluster_msg type)
 {
     switch (type) {
         case pcmk__cluster_msg_attrd:
             return "attrd";
         case pcmk__cluster_msg_based:
             return "cib";
         case pcmk__cluster_msg_controld:
             return "crmd";
         case pcmk__cluster_msg_execd:
             return "lrmd";
         case pcmk__cluster_msg_fenced:
             return "stonith-ng";
         default:
             return "unknown";
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a Corosync CPG message is valid
  *
  * \param[in] msg   Corosync CPG message to check
  *
  * \return true if \p msg is valid, otherwise false
  */
 static bool
 check_message_sanity(const pcmk__cpg_msg_t *msg)
 {
     int32_t payload_size = msg->header.size - sizeof(pcmk__cpg_msg_t);
 
     if (payload_size < 1) {
         crm_err("%sCPG message %d from %s invalid: "
                 "Claimed size of %d bytes is too small "
                 QB_XS " from %s[%u] to %s@%s",
                 (msg->is_compressed? "Compressed " : ""),
                 msg->id, ais_dest(&(msg->sender)),
                 (int) msg->header.size,
                 msg_type2text(msg->sender.type), msg->sender.pid,
                 msg_type2text(msg->host.type), ais_dest(&(msg->host)));
         return false;
     }
 
     if (msg->header.error != CS_OK) {
         crm_err("%sCPG message %d from %s invalid: "
                 "Sender indicated error %d "
                 QB_XS " from %s[%u] to %s@%s",
                 (msg->is_compressed? "Compressed " : ""),
                 msg->id, ais_dest(&(msg->sender)),
                 msg->header.error,
                 msg_type2text(msg->sender.type), msg->sender.pid,
                 msg_type2text(msg->host.type), ais_dest(&(msg->host)));
         return false;
     }
 
     if (msg_data_len(msg) != payload_size) {
         crm_err("%sCPG message %d from %s invalid: "
                 "Total size %d inconsistent with payload size %d "
                 QB_XS " from %s[%u] to %s@%s",
                 (msg->is_compressed? "Compressed " : ""),
                 msg->id, ais_dest(&(msg->sender)),
                 (int) msg->header.size, (int) msg_data_len(msg),
                 msg_type2text(msg->sender.type), msg->sender.pid,
                 msg_type2text(msg->host.type), ais_dest(&(msg->host)));
         return false;
     }
 
     if (!msg->is_compressed &&
         /* msg->size != (strlen(msg->data) + 1) would be a stronger check,
          * but checking the last byte or two should be quick
          */
         (((msg->size > 1) && (msg->data[msg->size - 2] == '\0'))
          || (msg->data[msg->size - 1] != '\0'))) {
         crm_err("CPG message %d from %s invalid: "
                 "Payload does not end at byte %llu "
                 QB_XS " from %s[%u] to %s@%s",
                 msg->id, ais_dest(&(msg->sender)),
                 (unsigned long long) msg->size,
                 msg_type2text(msg->sender.type), msg->sender.pid,
                 msg_type2text(msg->host.type), ais_dest(&(msg->host)));
         return false;
     }
 
     crm_trace("Verified %d-byte %sCPG message %d from %s[%u]@%s to %s@%s",
               (int) msg->header.size, (msg->is_compressed? "compressed " : ""),
               msg->id, msg_type2text(msg->sender.type), msg->sender.pid,
               ais_dest(&(msg->sender)),
               msg_type2text(msg->host.type), ais_dest(&(msg->host)));
     return true;
 }
 
 /*!
  * \internal
  * \brief Extract text data from a Corosync CPG message
  *
  * \param[in]     handle     CPG connection (to get local node ID if not known)
  * \param[in]     sender_id  Corosync ID of node that sent message
  * \param[in]     pid        Process ID of message sender (for logging only)
  * \param[in,out] content    CPG message
  * \param[out]    from       If not \c NULL, will be set to sender uname
  *                           (valid for the lifetime of \p content)
  *
  * \return Newly allocated string with message data
  *
  * \note The caller is responsible for freeing the return value using \c free().
  */
 char *
 pcmk__cpg_message_data(cpg_handle_t handle, uint32_t sender_id, uint32_t pid,
                        void *content, const char **from)
 {
     char *data = NULL;
     pcmk__cpg_msg_t *msg = content;
 
     if (handle != 0) {
         // Do filtering and field massaging
         uint32_t local_nodeid = pcmk__cpg_local_nodeid(handle);
         const char *local_name = pcmk__cluster_local_node_name();
 
         if ((msg->sender.id != 0) && (msg->sender.id != sender_id)) {
             crm_err("Nodeid mismatch from %" PRIu32 ".%" PRIu32
                     ": claimed nodeid=%" PRIu32,
                     sender_id, pid, msg->sender.id);
             return NULL;
         }
         if ((msg->host.id != 0) && (local_nodeid != msg->host.id)) {
             crm_trace("Not for us: %" PRIu32" != %" PRIu32,
                       msg->host.id, local_nodeid);
             return NULL;
         }
         if ((msg->host.size > 0)
             && !pcmk__str_eq(msg->host.uname, local_name, pcmk__str_casei)) {
 
             crm_trace("Not for us: %s != %s", msg->host.uname, local_name);
             return NULL;
         }
 
         msg->sender.id = sender_id;
         if (msg->sender.size == 0) {
             const pcmk__node_status_t *peer =
                 pcmk__get_node(sender_id, NULL, NULL,
                                pcmk__node_search_cluster_member);
 
             if (peer->name == NULL) {
                 crm_err("No node name for peer with nodeid=%u", sender_id);
 
             } else {
                 crm_notice("Fixing node name for peer with nodeid=%u",
                            sender_id);
                 msg->sender.size = strlen(peer->name);
                 memset(msg->sender.uname, 0, MAX_NAME);
                 memcpy(msg->sender.uname, peer->name, msg->sender.size);
             }
         }
     }
 
     crm_trace("Got new%s message (size=%d, %d, %d)",
               msg->is_compressed ? " compressed" : "",
               msg_data_len(msg), msg->size, msg->compressed_size);
 
     if (from != NULL) {
         *from = msg->sender.uname;
     }
 
     if (msg->is_compressed && (msg->size > 0)) {
         int rc = BZ_OK;
         char *uncompressed = NULL;
         unsigned int new_size = msg->size + 1;
 
         if (!check_message_sanity(msg)) {
             goto badmsg;
         }
 
         crm_trace("Decompressing message data");
         uncompressed = pcmk__assert_alloc(1, new_size);
         rc = BZ2_bzBuffToBuffDecompress(uncompressed, &new_size, msg->data,
                                         msg->compressed_size, 1, 0);
 
         rc = pcmk__bzlib2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             crm_err("Decompression failed: %s " QB_XS " rc=%d",
                     pcmk_rc_str(rc), rc);
             free(uncompressed);
             goto badmsg;
         }
 
         CRM_ASSERT(new_size == msg->size);
 
         data = uncompressed;
 
     } else if (!check_message_sanity(msg)) {
         goto badmsg;
 
     } else {
         data = strdup(msg->data);
     }
 
     // Is this necessary?
     pcmk__get_node(msg->sender.id, msg->sender.uname, NULL,
                    pcmk__node_search_cluster_member);
 
     crm_trace("Payload: %.200s", data);
     return data;
 
   badmsg:
     crm_err("Invalid message (id=%d, dest=%s:%s, from=%s:%s.%d):"
             " min=%d, total=%d, size=%d, bz2_size=%d",
             msg->id, ais_dest(&(msg->host)), msg_type2text(msg->host.type),
             ais_dest(&(msg->sender)), msg_type2text(msg->sender.type),
             msg->sender.pid, (int)sizeof(pcmk__cpg_msg_t),
             msg->header.size, msg->size, msg->compressed_size);
 
     free(data);
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Compare cpg_address objects by node ID
  *
  * \param[in] first   First cpg_address structure to compare
  * \param[in] second  Second cpg_address structure to compare
  *
  * \return Negative number if first's node ID is lower,
  *         positive number if first's node ID is greater,
  *         or 0 if both node IDs are equal
  */
 static int
 cmp_member_list_nodeid(const void *first, const void *second)
 {
     const struct cpg_address *const a = *((const struct cpg_address **) first),
                              *const b = *((const struct cpg_address **) second);
     if (a->nodeid < b->nodeid) {
         return -1;
     } else if (a->nodeid > b->nodeid) {
         return 1;
     }
     /* don't bother with "reason" nor "pid" */
     return 0;
 }
 
 /*!
  * \internal
  * \brief Get a readable string equivalent of a cpg_reason_t value
  *
  * \param[in] reason  CPG reason value
  *
  * \return Readable string suitable for logging
  */
 static const char *
 cpgreason2str(cpg_reason_t reason)
 {
     switch (reason) {
         case CPG_REASON_JOIN:       return " via cpg_join";
         case CPG_REASON_LEAVE:      return " via cpg_leave";
         case CPG_REASON_NODEDOWN:   return " via cluster exit";
         case CPG_REASON_NODEUP:     return " via cluster join";
         case CPG_REASON_PROCDOWN:   return " for unknown reason";
         default:                    break;
     }
     return "";
 }
 
 /*!
  * \internal
  * \brief Get a log-friendly node name
  *
  * \param[in] peer  Node to check
  *
  * \return Node's uname, or readable string if not known
  */
 static inline const char *
 peer_name(const pcmk__node_status_t *peer)
 {
     return (peer != NULL)? pcmk__s(peer->name, "peer node") : "unknown node";
 }
 
 /*!
  * \internal
  * \brief Process a CPG peer's leaving the cluster
  *
  * \param[in] cpg_group_name      CPG group name (for logging)
  * \param[in] event_counter       Event number (for logging)
  * \param[in] local_nodeid        Node ID of local node
  * \param[in] cpg_peer            CPG peer that left
  * \param[in] sorted_member_list  List of remaining members, qsort()-ed by ID
  * \param[in] member_list_entries Number of entries in \p sorted_member_list
  */
 static void
 node_left(const char *cpg_group_name, int event_counter,
           uint32_t local_nodeid, const struct cpg_address *cpg_peer,
           const struct cpg_address **sorted_member_list,
           size_t member_list_entries)
 {
     pcmk__node_status_t *peer =
         pcmk__search_node_caches(cpg_peer->nodeid, NULL,
                                  pcmk__node_search_cluster_member);
     const struct cpg_address **rival = NULL;
 
     /* Most CPG-related Pacemaker code assumes that only one process on a node
      * can be in the process group, but Corosync does not impose this
      * limitation, and more than one can be a member in practice due to a
      * daemon attempting to start while another instance is already running.
      *
      * Check for any such duplicate instances, because we don't want to process
      * their leaving as if our actual peer left. If the peer that left still has
      * an entry in sorted_member_list (with a different PID), we will ignore the
      * leaving.
      *
      * @TODO Track CPG members' PIDs so we can tell exactly who left.
      */
     if (peer != NULL) {
         rival = bsearch(&cpg_peer, sorted_member_list, member_list_entries,
                         sizeof(const struct cpg_address *),
                         cmp_member_list_nodeid);
     }
 
     if (rival == NULL) {
         crm_info("Group %s event %d: %s (node %u pid %u) left%s",
                  cpg_group_name, event_counter, peer_name(peer),
                  cpg_peer->nodeid, cpg_peer->pid,
                  cpgreason2str(cpg_peer->reason));
         if (peer != NULL) {
             crm_update_peer_proc(__func__, peer, crm_proc_cpg,
                                  PCMK_VALUE_OFFLINE);
         }
     } else if (cpg_peer->nodeid == local_nodeid) {
         crm_warn("Group %s event %d: duplicate local pid %u left%s",
                  cpg_group_name, event_counter,
                  cpg_peer->pid, cpgreason2str(cpg_peer->reason));
     } else {
         crm_warn("Group %s event %d: "
                  "%s (node %u) duplicate pid %u left%s (%u remains)",
                  cpg_group_name, event_counter, peer_name(peer),
                  cpg_peer->nodeid, cpg_peer->pid,
                  cpgreason2str(cpg_peer->reason), (*rival)->pid);
     }
 }
 
 /*!
  * \internal
  * \brief Handle a CPG configuration change event
  *
  * \param[in] handle               CPG connection
  * \param[in] group_name           CPG group name
  * \param[in] member_list          List of current CPG members
  * \param[in] member_list_entries  Number of entries in \p member_list
  * \param[in] left_list            List of CPG members that left
  * \param[in] left_list_entries    Number of entries in \p left_list
  * \param[in] joined_list          List of CPG members that joined
  * \param[in] joined_list_entries  Number of entries in \p joined_list
  *
  * \note This is of type \c cpg_confchg_fn_t, intended to be used in a
  *       \c cpg_callbacks_t object.
  */
 void
 pcmk__cpg_confchg_cb(cpg_handle_t handle,
                      const struct cpg_name *group_name,
                      const struct cpg_address *member_list,
                      size_t member_list_entries,
                      const struct cpg_address *left_list,
                      size_t left_list_entries,
                      const struct cpg_address *joined_list,
                      size_t joined_list_entries)
 {
     static int counter = 0;
 
     bool found = false;
     uint32_t local_nodeid = pcmk__cpg_local_nodeid(handle);
     const struct cpg_address **sorted = NULL;
 
     sorted = pcmk__assert_alloc(member_list_entries,
                                 sizeof(const struct cpg_address *));
 
     for (size_t iter = 0; iter < member_list_entries; iter++) {
         sorted[iter] = member_list + iter;
     }
 
     // So that the cross-matching of multiply-subscribed nodes is then cheap
     qsort(sorted, member_list_entries, sizeof(const struct cpg_address *),
           cmp_member_list_nodeid);
 
     for (int i = 0; i < left_list_entries; i++) {
         node_left(group_name->value, counter, local_nodeid, &left_list[i],
                   sorted, member_list_entries);
     }
     free(sorted);
     sorted = NULL;
 
     for (int i = 0; i < joined_list_entries; i++) {
         crm_info("Group %s event %d: node %u pid %u joined%s",
                  group_name->value, counter, joined_list[i].nodeid,
                  joined_list[i].pid, cpgreason2str(joined_list[i].reason));
     }
 
     for (int i = 0; i < member_list_entries; i++) {
         pcmk__node_status_t *peer =
             pcmk__get_node(member_list[i].nodeid, NULL, NULL,
                            pcmk__node_search_cluster_member);
 
         if (member_list[i].nodeid == local_nodeid
                 && member_list[i].pid != getpid()) {
             // See the note in node_left()
             crm_warn("Group %s event %d: detected duplicate local pid %u",
                      group_name->value, counter, member_list[i].pid);
             continue;
         }
         crm_info("Group %s event %d: %s (node %u pid %u) is member",
                  group_name->value, counter, peer_name(peer),
                  member_list[i].nodeid, member_list[i].pid);
 
         /* If the caller left auto-reaping enabled, this will also update the
          * state to member.
          */
         peer = crm_update_peer_proc(__func__, peer, crm_proc_cpg,
                                     PCMK_VALUE_ONLINE);
 
-        if (peer && peer->state && strcmp(peer->state, CRM_NODE_MEMBER)) {
+        if (peer && peer->state && strcmp(peer->state, PCMK_VALUE_MEMBER)) {
             /* The node is a CPG member, but we currently think it's not a
              * cluster member. This is possible only if auto-reaping was
              * disabled. The node may be joining, and we happened to get the CPG
              * notification before the quorum notification; or the node may have
              * just died, and we are processing its final messages; or a bug
              * has affected the peer cache.
              */
             time_t now = time(NULL);
 
             if (peer->when_lost == 0) {
                 // Track when we first got into this contradictory state
                 peer->when_lost = now;
 
             } else if (now > (peer->when_lost + 60)) {
                 // If it persists for more than a minute, update the state
                 crm_warn("Node %u is member of group %s but was believed "
                          "offline",
                          member_list[i].nodeid, group_name->value);
-                pcmk__update_peer_state(__func__, peer, CRM_NODE_MEMBER, 0);
+                pcmk__update_peer_state(__func__, peer, PCMK_VALUE_MEMBER, 0);
             }
         }
 
         if (local_nodeid == member_list[i].nodeid) {
             found = true;
         }
     }
 
     if (!found) {
         crm_err("Local node was evicted from group %s", group_name->value);
         cpg_evicted = true;
     }
 
     counter++;
 }
 
 /*!
  * \brief Set the CPG deliver callback function for a cluster object
  *
  * \param[in,out] cluster  Cluster object
  * \param[in]     fn       Deliver callback function to set
  *
  * \return Standard Pacemaker return code
  */
 int
 pcmk_cpg_set_deliver_fn(pcmk_cluster_t *cluster, cpg_deliver_fn_t fn)
 {
     if (cluster == NULL) {
         return EINVAL;
     }
     cluster->cpg.cpg_deliver_fn = fn;
     return pcmk_rc_ok;
 }
 
 /*!
  * \brief Set the CPG config change callback function for a cluster object
  *
  * \param[in,out] cluster  Cluster object
  * \param[in]     fn       Configuration change callback function to set
  *
  * \return Standard Pacemaker return code
  */
 int
 pcmk_cpg_set_confchg_fn(pcmk_cluster_t *cluster, cpg_confchg_fn_t fn)
 {
     if (cluster == NULL) {
         return EINVAL;
     }
     cluster->cpg.cpg_confchg_fn = fn;
     return pcmk_rc_ok;
 }
 
 /*!
  * \brief Connect to Corosync CPG
  *
  * \param[in,out] cluster  Initialized cluster object to connect
  *
  * \return Standard Pacemaker return code
  */
 int
 pcmk__cpg_connect(pcmk_cluster_t *cluster)
 {
     cs_error_t rc;
     int fd = -1;
     int retries = 0;
     uint32_t id = 0;
     pcmk__node_status_t *peer = NULL;
     cpg_handle_t handle = 0;
     const char *message_name = pcmk__message_name(crm_system_name);
     uid_t found_uid = 0;
     gid_t found_gid = 0;
     pid_t found_pid = 0;
     int rv;
 
     struct mainloop_fd_callbacks cpg_fd_callbacks = {
         .dispatch = pcmk_cpg_dispatch,
         .destroy = cluster->destroy,
     };
 
     cpg_model_v1_data_t cpg_model_info = {
 	    .model = CPG_MODEL_V1,
 	    .cpg_deliver_fn = cluster->cpg.cpg_deliver_fn,
 	    .cpg_confchg_fn = cluster->cpg.cpg_confchg_fn,
 	    .cpg_totem_confchg_fn = NULL,
 	    .flags = 0,
     };
 
     cpg_evicted = false;
     cluster->priv->group.length = 0;
     cluster->priv->group.value[0] = 0;
 
     /* group.value is char[128] */
     strncpy(cluster->priv->group.value, message_name, 127);
     cluster->priv->group.value[127] = 0;
     cluster->priv->group.length = QB_MIN(127,
                                          strlen(cluster->priv->group.value));
     cluster->priv->group.length++;
 
     cs_repeat(rc, retries, 30, cpg_model_initialize(&handle, CPG_MODEL_V1, (cpg_model_data_t *)&cpg_model_info, NULL));
     if (rc != CS_OK) {
         crm_err("Could not connect to the CPG API: %s (%d)",
                 cs_strerror(rc), rc);
         goto bail;
     }
 
     rc = cpg_fd_get(handle, &fd);
     if (rc != CS_OK) {
         crm_err("Could not obtain the CPG API connection: %s (%d)",
                 cs_strerror(rc), rc);
         goto bail;
     }
 
     /* CPG provider run as root (in given user namespace, anyway)? */
     if (!(rv = crm_ipc_is_authentic_process(fd, (uid_t) 0,(gid_t) 0, &found_pid,
                                             &found_uid, &found_gid))) {
         crm_err("CPG provider is not authentic:"
                 " process %lld (uid: %lld, gid: %lld)",
                 (long long) PCMK__SPECIAL_PID_AS_0(found_pid),
                 (long long) found_uid, (long long) found_gid);
         rc = CS_ERR_ACCESS;
         goto bail;
     } else if (rv < 0) {
         crm_err("Could not verify authenticity of CPG provider: %s (%d)",
                 strerror(-rv), -rv);
         rc = CS_ERR_ACCESS;
         goto bail;
     }
 
     id = pcmk__cpg_local_nodeid(handle);
     if (id == 0) {
         crm_err("Could not get local node id from the CPG API");
         goto bail;
 
     }
     cluster->priv->node_id = id;
 
     retries = 0;
     cs_repeat(rc, retries, 30, cpg_join(handle, &cluster->priv->group));
     if (rc != CS_OK) {
         crm_err("Could not join the CPG group '%s': %d", message_name, rc);
         goto bail;
     }
 
     pcmk_cpg_handle = handle;
     cluster->priv->cpg_handle = handle;
     mainloop_add_fd("corosync-cpg", G_PRIORITY_MEDIUM, fd, cluster, &cpg_fd_callbacks);
 
   bail:
     if (rc != CS_OK) {
         cpg_finalize(handle);
         // @TODO Map rc to more specific Pacemaker return code
         return ENOTCONN;
     }
 
     peer = pcmk__get_node(id, NULL, NULL, pcmk__node_search_cluster_member);
     crm_update_peer_proc(__func__, peer, crm_proc_cpg, PCMK_VALUE_ONLINE);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Disconnect from Corosync CPG
  *
  * \param[in,out] cluster  Cluster object to disconnect
  */
 void
 pcmk__cpg_disconnect(pcmk_cluster_t *cluster)
 {
     pcmk_cpg_handle = 0;
     if (cluster->priv->cpg_handle != 0) {
         crm_trace("Disconnecting CPG");
         cpg_leave(cluster->priv->cpg_handle, &cluster->priv->group);
         cpg_finalize(cluster->priv->cpg_handle);
         cluster->priv->cpg_handle = 0;
 
     } else {
         crm_info("No CPG connection");
     }
 }
 
 /*!
  * \internal
  * \brief Send string data via Corosync CPG
  *
  * \param[in] data   Data to send
  * \param[in] node   Cluster node to send message to
  * \param[in] dest   Type of message to send
  *
  * \return \c true on success, or \c false otherwise
  */
 static bool
 send_cpg_text(const char *data, const pcmk__node_status_t *node,
               enum pcmk__cluster_msg dest)
 {
     static int msg_id = 0;
     static int local_pid = 0;
     static int local_name_len = 0;
     static const char *local_name = NULL;
 
     char *target = NULL;
     struct iovec *iov;
     pcmk__cpg_msg_t *msg = NULL;
 
     if (local_name == NULL) {
         local_name = pcmk__cluster_local_node_name();
     }
     if ((local_name_len == 0) && (local_name != NULL)) {
         local_name_len = strlen(local_name);
     }
 
     if (data == NULL) {
         data = "";
     }
 
     if (local_pid == 0) {
         local_pid = getpid();
     }
 
     msg = pcmk__assert_alloc(1, sizeof(pcmk__cpg_msg_t));
 
     msg_id++;
     msg->id = msg_id;
     msg->header.error = CS_OK;
 
     msg->host.type = dest;
 
     if (node != NULL) {
         if (node->name != NULL) {
             target = pcmk__str_copy(node->name);
             msg->host.size = strlen(node->name);
             memset(msg->host.uname, 0, MAX_NAME);
             memcpy(msg->host.uname, node->name, msg->host.size);
 
         } else {
             target = crm_strdup_printf("%" PRIu32, node->cluster_layer_id);
         }
         msg->host.id = node->cluster_layer_id;
 
     } else {
         target = pcmk__str_copy("all");
     }
 
     msg->sender.id = 0;
     msg->sender.type = pcmk__cluster_parse_msg_type(crm_system_name);
     msg->sender.pid = local_pid;
     msg->sender.size = local_name_len;
     memset(msg->sender.uname, 0, MAX_NAME);
 
     if ((local_name != NULL) && (msg->sender.size != 0)) {
         memcpy(msg->sender.uname, local_name, msg->sender.size);
     }
 
     msg->size = 1 + strlen(data);
     msg->header.size = sizeof(pcmk__cpg_msg_t) + msg->size;
 
     if (msg->size < CRM_BZ2_THRESHOLD) {
         msg = pcmk__realloc(msg, msg->header.size);
         memcpy(msg->data, data, msg->size);
 
     } else {
         char *compressed = NULL;
         unsigned int new_size = 0;
 
         if (pcmk__compress(data, (unsigned int) msg->size, 0, &compressed,
                            &new_size) == pcmk_rc_ok) {
 
             msg->header.size = sizeof(pcmk__cpg_msg_t) + new_size;
             msg = pcmk__realloc(msg, msg->header.size);
             memcpy(msg->data, compressed, new_size);
 
             msg->is_compressed = TRUE;
             msg->compressed_size = new_size;
 
         } else {
             // cppcheck seems not to understand the abort logic in pcmk__realloc
             // cppcheck-suppress memleak
             msg = pcmk__realloc(msg, msg->header.size);
             memcpy(msg->data, data, msg->size);
         }
 
         free(compressed);
     }
 
     iov = pcmk__assert_alloc(1, sizeof(struct iovec));
     iov->iov_base = msg;
     iov->iov_len = msg->header.size;
 
     if (msg->compressed_size > 0) {
         crm_trace("Queueing CPG message %u to %s "
                   "(%llu bytes, %d bytes compressed payload): %.200s",
                   msg->id, target, (unsigned long long) iov->iov_len,
                   msg->compressed_size, data);
     } else {
         crm_trace("Queueing CPG message %u to %s "
                   "(%llu bytes, %d bytes payload): %.200s",
                   msg->id, target, (unsigned long long) iov->iov_len,
                   msg->size, data);
     }
 
     free(target);
 
     cs_message_queue = g_list_append(cs_message_queue, iov);
     crm_cs_flush(&pcmk_cpg_handle);
 
     return true;
 }
 
 /*!
  * \internal
  * \brief Send an XML message via Corosync CPG
  *
  * \param[in] msg   XML message to send
  * \param[in] node  Cluster node to send message to
  * \param[in] dest  Type of message to send
  *
  * \return TRUE on success, otherwise FALSE
  */
 bool
 pcmk__cpg_send_xml(const xmlNode *msg, const pcmk__node_status_t *node,
                    enum pcmk__cluster_msg dest)
 {
     bool rc = true;
     GString *data = g_string_sized_new(1024);
 
     pcmk__xml_string(msg, 0, data, 0);
 
     rc = send_cpg_text(data->str, node, dest);
     g_string_free(data, TRUE);
     return rc;
 }
diff --git a/lib/cluster/membership.c b/lib/cluster/membership.c
index 4ddad057bc..42cc66c67c 100644
--- a/lib/cluster/membership.c
+++ b/lib/cluster/membership.c
@@ -1,1506 +1,1506 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <inttypes.h>                   // PRIu32
 #include <stdbool.h>                    // bool
 #include <stdio.h>
 #include <string.h>
 #include <sys/param.h>
 #include <sys/types.h>
 #include <unistd.h>
 
 #include <glib.h>
 
 #include <crm/common/ipc.h>
 #include <crm/common/xml_internal.h>
 #include <crm/cluster/internal.h>
 #include <crm/common/xml.h>
 #include <crm/stonith-ng.h>
 #include "crmcluster_private.h"
 
 /* The peer cache remembers cluster nodes that have been seen.
  * This is managed mostly automatically by libcluster, based on
  * cluster membership events.
  *
  * Because cluster nodes can have conflicting names or UUIDs,
  * the hash table key is a uniquely generated ID.
  *
  * @COMPAT When this is internal, rename to cluster_node_member_cache and make
  * static.
  */
 GHashTable *crm_peer_cache = NULL;
 
 /*
  * The remote peer cache tracks pacemaker_remote nodes. While the
  * value has the same type as the peer cache's, it is tracked separately for
  * three reasons: pacemaker_remote nodes can't have conflicting names or UUIDs,
  * so the name (which is also the UUID) is used as the hash table key; there
  * is no equivalent of membership events, so management is not automatic; and
  * most users of the peer cache need to exclude pacemaker_remote nodes.
  *
  * That said, using a single cache would be more logical and less error-prone,
  * so it would be a good idea to merge them one day.
  *
  * libcluster provides two avenues for populating the cache:
  * pcmk__cluster_lookup_remote_node() and pcmk__cluster_forget_remote_node()
  * directly manage it, while refresh_remote_nodes() populates it via the CIB.
  */
 GHashTable *crm_remote_peer_cache = NULL;
 
 /*
  * The CIB cluster node cache tracks cluster nodes that have been seen in
  * the CIB. It is useful mainly when a caller needs to know about a node that
  * may no longer be in the membership, but doesn't want to add the node to the
  * main peer cache tables.
  */
 static GHashTable *cluster_node_cib_cache = NULL;
 
 unsigned long long crm_peer_seq = 0;
 static bool autoreap = true;
 static bool has_quorum = false;
 
 // Flag setting and clearing for pcmk__node_status_t:flags
 
 #define set_peer_flags(peer, flags_to_set) do {                               \
         (peer)->flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Peer", (peer)->name,              \
                                            (peer)->flags, (flags_to_set),     \
                                            #flags_to_set);                    \
     } while (0)
 
 #define clear_peer_flags(peer, flags_to_clear) do {                           \
         (peer)->flags = pcmk__clear_flags_as(__func__, __LINE__,              \
                                              LOG_TRACE,                       \
                                              "Peer", (peer)->name,            \
                                              (peer)->flags, (flags_to_clear), \
                                              #flags_to_clear);                \
     } while (0)
 
 static void update_peer_uname(pcmk__node_status_t *node, const char *uname);
 static pcmk__node_status_t *find_cib_cluster_node(const char *id,
                                                   const char *uname);
 
 /*!
  * \internal
  * \brief Check whether the cluster currently has quorum
  *
  * \return \c true if the cluster has quorum, or \c false otherwise
  */
 bool
 pcmk__cluster_has_quorum(void)
 {
     return has_quorum;
 }
 
 /*!
  * \internal
  * \brief Set whether the cluster currently has quorum
  *
  * \param[in] quorate  \c true if the cluster has quorum, or \c false otherwise
  */
 void
 pcmk__cluster_set_quorum(bool quorate)
 {
     has_quorum = quorate;
 }
 
 /*!
  * \internal
  * \brief Get the number of Pacemaker Remote nodes that have been seen
  *
  * \return Number of cached Pacemaker Remote nodes
  */
 unsigned int
 pcmk__cluster_num_remote_nodes(void)
 {
     if (crm_remote_peer_cache == NULL) {
         return 0U;
     }
     return g_hash_table_size(crm_remote_peer_cache);
 }
 
 /*!
  * \internal
  * \brief Get a remote node cache entry, creating it if necessary
  *
  * \param[in] node_name  Name of remote node
  *
  * \return Cache entry for node on success, or \c NULL (and set \c errno)
  *         otherwise
  *
  * \note When creating a new entry, this will leave the node state undetermined.
  *       The caller should also call \c pcmk__update_peer_state() if the state
  *       is known.
  * \note Because this can add and remove cache entries, callers should not
  *       assume any previously obtained cache entry pointers remain valid.
  */
 pcmk__node_status_t *
 pcmk__cluster_lookup_remote_node(const char *node_name)
 {
     pcmk__node_status_t *node = NULL;
     char *node_name_copy = NULL;
 
     if (node_name == NULL) {
         errno = EINVAL;
         return NULL;
     }
 
     /* It's theoretically possible that the node was added to the cluster peer
      * cache before it was known to be a Pacemaker Remote node. Remove that
      * entry unless it has a node ID, which means the name actually is
      * associated with a cluster node. (@TODO return an error in that case?)
      */
     node = pcmk__search_node_caches(0, node_name,
                                     pcmk__node_search_cluster_member);
     if ((node != NULL) && (node->xml_id == NULL)) {
         /* node_name could be a pointer into the cache entry being removed, so
          * reassign it to a copy before the original gets freed
          */
         node_name_copy = strdup(node_name);
         if (node_name_copy == NULL) {
             errno = ENOMEM;
             return NULL;
         }
         node_name = node_name_copy;
         pcmk__cluster_forget_cluster_node(0, node_name);
     }
 
     /* Return existing cache entry if one exists */
     node = g_hash_table_lookup(crm_remote_peer_cache, node_name);
     if (node) {
         free(node_name_copy);
         return node;
     }
 
     /* Allocate a new entry */
     node = calloc(1, sizeof(pcmk__node_status_t));
     if (node == NULL) {
         free(node_name_copy);
         return NULL;
     }
 
     /* Populate the essential information */
     set_peer_flags(node, pcmk__node_status_remote);
     node->xml_id = strdup(node_name);
     if (node->xml_id == NULL) {
         free(node);
         errno = ENOMEM;
         free(node_name_copy);
         return NULL;
     }
 
     /* Add the new entry to the cache */
     g_hash_table_replace(crm_remote_peer_cache, node->xml_id, node);
     crm_trace("added %s to remote cache", node_name);
 
     /* Update the entry's uname, ensuring peer status callbacks are called */
     update_peer_uname(node, node_name);
     free(node_name_copy);
     return node;
 }
 
 /*!
  * \internal
  * \brief Remove a node from the Pacemaker Remote node cache
  *
  * \param[in] node_name  Name of node to remove from cache
  *
  * \note The caller must be careful not to use \p node_name after calling this
  *       function if it might be a pointer into the cache entry being removed.
  */
 void
 pcmk__cluster_forget_remote_node(const char *node_name)
 {
     /* Do a lookup first, because node_name could be a pointer within the entry
      * being removed -- we can't log it *after* removing it.
      */
     if (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL) {
         crm_trace("Removing %s from Pacemaker Remote node cache", node_name);
         g_hash_table_remove(crm_remote_peer_cache, node_name);
     }
 }
 
 /*!
  * \internal
  * \brief Return node status based on a CIB status entry
  *
  * \param[in] node_state  XML of node state
  *
- * \return \c CRM_NODE_MEMBER if \c PCMK__XA_IN_CCM is true in
+ * \return \c PCMK_VALUE_MEMBER if \c PCMK__XA_IN_CCM is true in
  *         \c PCMK__XE_NODE_STATE, or \c CRM_NODE_LOST otherwise
  */
 static const char *
 remote_state_from_cib(const xmlNode *node_state)
 {
     bool in_ccm = false;
 
     if ((pcmk__xe_get_bool_attr(node_state, PCMK__XA_IN_CCM,
                                 &in_ccm) == pcmk_rc_ok) && in_ccm) {
-        return CRM_NODE_MEMBER;
+        return PCMK_VALUE_MEMBER;
     }
     return CRM_NODE_LOST;
 }
 
 /* user data for looping through remote node xpath searches */
 struct refresh_data {
     const char *field;  /* XML attribute to check for node name */
     gboolean has_state; /* whether to update node state based on XML */
 };
 
 /*!
  * \internal
  * \brief Process one pacemaker_remote node xpath search result
  *
  * \param[in] result     XML search result
  * \param[in] user_data  what to look for in the XML
  */
 static void
 remote_cache_refresh_helper(xmlNode *result, void *user_data)
 {
     const struct refresh_data *data = user_data;
     const char *remote = crm_element_value(result, data->field);
     const char *state = NULL;
     pcmk__node_status_t *node;
 
     CRM_CHECK(remote != NULL, return);
 
     /* Determine node's state, if the result has it */
     if (data->has_state) {
         state = remote_state_from_cib(result);
     }
 
     /* Check whether cache already has entry for node */
     node = g_hash_table_lookup(crm_remote_peer_cache, remote);
 
     if (node == NULL) {
         /* Node is not in cache, so add a new entry for it */
         node = pcmk__cluster_lookup_remote_node(remote);
         CRM_ASSERT(node);
         if (state) {
             pcmk__update_peer_state(__func__, node, state, 0);
         }
 
     } else if (pcmk_is_set(node->flags, pcmk__node_status_dirty)) {
         /* Node is in cache and hasn't been updated already, so mark it clean */
         clear_peer_flags(node, pcmk__node_status_dirty);
         if (state) {
             pcmk__update_peer_state(__func__, node, state, 0);
         }
     }
 }
 
 static void
 mark_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     set_peer_flags((pcmk__node_status_t *) value, pcmk__node_status_dirty);
 }
 
 static gboolean
 is_dirty(gpointer key, gpointer value, gpointer user_data)
 {
     const pcmk__node_status_t *node = value;
 
     return pcmk_is_set(node->flags, pcmk__node_status_dirty);
 }
 
 /*!
  * \internal
  * \brief Repopulate the remote node cache based on CIB XML
  *
  * \param[in] cib  CIB XML to parse
  */
 static void
 refresh_remote_nodes(xmlNode *cib)
 {
     struct refresh_data data;
 
     pcmk__cluster_init_node_caches();
 
     /* First, we mark all existing cache entries as dirty,
      * so that later we can remove any that weren't in the CIB.
      * We don't empty the cache, because we need to detect changes in state.
      */
     g_hash_table_foreach(crm_remote_peer_cache, mark_dirty, NULL);
 
     /* Look for guest nodes and remote nodes in the status section */
     data.field = PCMK_XA_ID;
     data.has_state = TRUE;
     crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_STATUS,
                              remote_cache_refresh_helper, &data);
 
     /* Look for guest nodes and remote nodes in the configuration section,
      * because they may have just been added and not have a status entry yet.
      * In that case, the cached node state will be left NULL, so that the
      * peer status callback isn't called until we're sure the node started
      * successfully.
      */
     data.field = PCMK_XA_VALUE;
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, PCMK__XP_GUEST_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
     data.field = PCMK_XA_ID;
     data.has_state = FALSE;
     crm_foreach_xpath_result(cib, PCMK__XP_REMOTE_NODE_CONFIG,
                              remote_cache_refresh_helper, &data);
 
     /* Remove all old cache entries that weren't seen in the CIB */
     g_hash_table_foreach_remove(crm_remote_peer_cache, is_dirty, NULL);
 }
 
 /*!
  * \internal
  * \brief Check whether a node is an active cluster node
  *
  * Remote nodes are never considered active. This guarantees that they can never
  * become DC.
  *
  * \param[in] node  Node to check
  *
  * \return \c true if the node is an active cluster node, or \c false otherwise
  */
 bool
 pcmk__cluster_is_node_active(const pcmk__node_status_t *node)
 {
     const enum pcmk_cluster_layer cluster_layer = pcmk_get_cluster_layer();
 
     if ((node == NULL) || pcmk_is_set(node->flags, pcmk__node_status_remote)) {
         return false;
     }
 
     switch (cluster_layer) {
         case pcmk_cluster_layer_corosync:
 #if SUPPORT_COROSYNC
             return pcmk__corosync_is_peer_active(node);
 #else
             break;
 #endif  // SUPPORT_COROSYNC
         default:
             break;
     }
 
     crm_err("Unhandled cluster layer: %s",
             pcmk_cluster_layer_text(cluster_layer));
     return false;
 }
 
 /*!
  * \internal
  * \brief Check if a node's entry should be removed from the cluster node cache
  *
  * A node should be removed from the cache if it's inactive and matches another
  * \c pcmk__node_status_t (the search object). The node is considered a
  * mismatch if any of the following are true:
  * * The search object is \c NULL.
  * * The search object has an ID set and the cached node's ID does not match it.
  * * The search object does not have an ID set, and the cached node's name does
  *   not match the search node's name. (If both names are \c NULL, it's a
  *   match.)
  *
  * Otherwise, the node is considered a match.
  *
  * Note that if the search object has both an ID and a name set, the name is
  * ignored for matching purposes.
  *
  * \param[in] key        Ignored
  * \param[in] value      \c pcmk__node_status_t object from cluster node cache
  * \param[in] user_data  \c pcmk__node_status_t object to match against (search
  *                       object)
  *
  * \return \c TRUE if the node entry should be removed from \c crm_peer_cache,
  *         or \c FALSE otherwise
  */
 static gboolean
 should_forget_cluster_node(gpointer key, gpointer value, gpointer user_data)
 {
     pcmk__node_status_t *node = value;
     pcmk__node_status_t *search = user_data;
 
     if (search == NULL) {
         return FALSE;
     }
     if ((search->cluster_layer_id != 0)
         && (node->cluster_layer_id != search->cluster_layer_id)) {
         return FALSE;
     }
     if ((search->cluster_layer_id == 0)
         && !pcmk__str_eq(node->name, search->name, pcmk__str_casei)) {
         // @TODO Consider name even if ID is set?
         return FALSE;
     }
     if (pcmk__cluster_is_node_active(value)) {
         return FALSE;
     }
 
     crm_info("Removing node with name %s and cluster layer ID " PRIu32
              " from membership cache",
              pcmk__s(node->name, "(unknown)"), node->cluster_layer_id);
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Remove one or more inactive nodes from the cluster node cache
  *
  * All inactive nodes matching \p id and \p node_name as described in
  * \c should_forget_cluster_node documentation are removed from the cache.
  *
  * If \p id is 0 and \p node_name is \c NULL, all inactive nodes are removed
  * from the cache regardless of ID and name. This differs from clearing the
  * cache, in that entries for active nodes are preserved.
  *
  * \param[in] id         ID of node to remove from cache (0 to ignore)
  * \param[in] node_name  Name of node to remove from cache (ignored if \p id is
  *                       nonzero)
  *
  * \note \p node_name is not modified directly, but it will be freed if it's a
  *       pointer into a cache entry that is removed.
  */
 void
 pcmk__cluster_forget_cluster_node(uint32_t id, const char *node_name)
 {
     pcmk__node_status_t search = { 0, };
     char *criterion = NULL; // For logging
     guint matches = 0;
 
     if (crm_peer_cache == NULL) {
         crm_trace("Membership cache not initialized, ignoring removal request");
         return;
     }
 
     search.cluster_layer_id = id;
     search.name = pcmk__str_copy(node_name);    // May log after original freed
 
     if (id > 0) {
         criterion = crm_strdup_printf("cluster layer ID %" PRIu32, id);
 
     } else if (node_name != NULL) {
         criterion = crm_strdup_printf("name %s", node_name);
     }
 
     matches = g_hash_table_foreach_remove(crm_peer_cache,
                                           should_forget_cluster_node, &search);
     if (matches > 0) {
         if (criterion != NULL) {
             crm_notice("Removed %u inactive node%s with %s from the membership "
                        "cache",
                        matches, pcmk__plural_s(matches), criterion);
         } else {
             crm_notice("Removed all (%u) inactive cluster nodes from the "
                        "membership cache",
                        matches);
         }
 
     } else {
         crm_info("No inactive cluster nodes%s%s to remove from the membership "
                  "cache",
                  ((criterion != NULL)? " with " : ""), pcmk__s(criterion, ""));
     }
 
     free(search.name);
     free(criterion);
 }
 
 static void
 count_peer(gpointer key, gpointer value, gpointer user_data)
 {
     unsigned int *count = user_data;
     pcmk__node_status_t *node = value;
 
     if (pcmk__cluster_is_node_active(node)) {
         *count = *count + 1;
     }
 }
 
 /*!
  * \internal
  * \brief Get the number of active cluster nodes that have been seen
  *
  * Remote nodes are never considered active. This guarantees that they can never
  * become DC.
  *
  * \return Number of active nodes in the cluster node cache
  */
 unsigned int
 pcmk__cluster_num_active_nodes(void)
 {
     unsigned int count = 0;
 
     if (crm_peer_cache != NULL) {
         g_hash_table_foreach(crm_peer_cache, count_peer, &count);
     }
     return count;
 }
 
 static void
 destroy_crm_node(gpointer data)
 {
     pcmk__node_status_t *node = data;
 
     crm_trace("Destroying entry for node %" PRIu32 ": %s",
               node->cluster_layer_id, node->name);
 
     free(node->name);
     free(node->state);
     free(node->xml_id);
     free(node->user_data);
     free(node->expected);
     free(node->conn_host);
     free(node);
 }
 
 /*!
  * \internal
  * \brief Initialize node caches
  */
 void
 pcmk__cluster_init_node_caches(void)
 {
     if (crm_peer_cache == NULL) {
         crm_peer_cache = pcmk__strikey_table(free, destroy_crm_node);
     }
 
     if (crm_remote_peer_cache == NULL) {
         crm_remote_peer_cache = pcmk__strikey_table(NULL, destroy_crm_node);
     }
 
     if (cluster_node_cib_cache == NULL) {
         cluster_node_cib_cache = pcmk__strikey_table(free, destroy_crm_node);
     }
 }
 
 /*!
  * \internal
  * \brief Initialize node caches
  */
 void
 pcmk__cluster_destroy_node_caches(void)
 {
     if (crm_peer_cache != NULL) {
         crm_trace("Destroying peer cache with %d members",
                   g_hash_table_size(crm_peer_cache));
         g_hash_table_destroy(crm_peer_cache);
         crm_peer_cache = NULL;
     }
 
     if (crm_remote_peer_cache != NULL) {
         crm_trace("Destroying remote peer cache with %d members",
                   pcmk__cluster_num_remote_nodes());
         g_hash_table_destroy(crm_remote_peer_cache);
         crm_remote_peer_cache = NULL;
     }
 
     if (cluster_node_cib_cache != NULL) {
         crm_trace("Destroying configured cluster node cache with %d members",
                   g_hash_table_size(cluster_node_cib_cache));
         g_hash_table_destroy(cluster_node_cib_cache);
         cluster_node_cib_cache = NULL;
     }
 }
 
 static void (*peer_status_callback)(enum pcmk__node_update,
                                     pcmk__node_status_t *,
                                     const void *) = NULL;
 
 /*!
  * \internal
  * \brief Set a client function that will be called after peer status changes
  *
  * \param[in] dispatch  Pointer to function to use as callback
  *
  * \note Client callbacks should do only client-specific handling. Callbacks
  *       must not add or remove entries in the peer caches.
  */
 void
 pcmk__cluster_set_status_callback(void (*dispatch)(enum pcmk__node_update,
                                                    pcmk__node_status_t *,
                                                    const void *))
 {
     // @TODO Improve documentation of peer_status_callback
     peer_status_callback = dispatch;
 }
 
 /*!
  * \internal
  * \brief Tell the library whether to automatically reap lost nodes
  *
  * If \c true (the default), calling \c crm_update_peer_proc() will also update
- * the peer state to \c CRM_NODE_MEMBER or \c CRM_NODE_LOST, and updating the
+ * the peer state to \c PCMK_VALUE_MEMBER or \c CRM_NODE_LOST, and updating the
  * peer state will reap peers whose state changes to anything other than
- * \c CRM_NODE_MEMBER.
+ * \c PCMK_VALUE_MEMBER.
  *
  * Callers should leave this enabled unless they plan to manage the cache
  * separately on their own.
  *
  * \param[in] enable  \c true to enable automatic reaping, \c false to disable
  */
 void
 pcmk__cluster_set_autoreap(bool enable)
 {
     autoreap = enable;
 }
 
 static void
 dump_peer_hash(int level, const char *caller)
 {
     GHashTableIter iter;
     const char *id = NULL;
     pcmk__node_status_t *node = NULL;
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, (gpointer *) &id, (gpointer *) &node)) {
         do_crm_log(level, "%s: Node %" PRIu32 "/%s = %p - %s",
                    caller, node->cluster_layer_id, node->name, node, id);
     }
 }
 
 static gboolean
 hash_find_by_data(gpointer key, gpointer value, gpointer user_data)
 {
     return value == user_data;
 }
 
 /*!
  * \internal
  * \brief Search cluster member node cache
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] uuid   If not NULL while id is 0, node UUID instead of cluster
  *                   node ID to search for
  *
  * \return Cluster node cache entry if found, otherwise NULL
  */
 static pcmk__node_status_t *
 search_cluster_member_cache(unsigned int id, const char *uname,
                             const char *uuid)
 {
     GHashTableIter iter;
     pcmk__node_status_t *node = NULL;
     pcmk__node_status_t *by_id = NULL;
     pcmk__node_status_t *by_name = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     pcmk__cluster_init_node_caches();
 
     if (uname != NULL) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (pcmk__str_eq(node->name, uname, pcmk__str_casei)) {
                 crm_trace("Name match: %s", node->name);
                 by_name = node;
                 break;
             }
         }
     }
 
     if (id > 0) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (node->cluster_layer_id == id) {
                 crm_trace("ID match: %" PRIu32, node->cluster_layer_id);
                 by_id = node;
                 break;
             }
         }
 
     } else if (uuid != NULL) {
         g_hash_table_iter_init(&iter, crm_peer_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (pcmk__str_eq(node->xml_id, uuid, pcmk__str_casei)) {
                 crm_trace("UUID match: %s", node->xml_id);
                 by_id = node;
                 break;
             }
         }
     }
 
     node = by_id; /* Good default */
     if(by_id == by_name) {
         /* Nothing to do if they match (both NULL counts) */
         crm_trace("Consistent: %p for %u/%s", by_id, id, uname);
 
     } else if(by_id == NULL && by_name) {
         crm_trace("Only one: %p for %u/%s", by_name, id, uname);
 
         if (id && by_name->cluster_layer_id) {
             dump_peer_hash(LOG_WARNING, __func__);
             crm_crit("Nodes %u and %" PRIu32 " share the same name '%s'",
                      id, by_name->cluster_layer_id, uname);
             node = NULL; /* Create a new one */
 
         } else {
             node = by_name;
         }
 
     } else if(by_name == NULL && by_id) {
         crm_trace("Only one: %p for %u/%s", by_id, id, uname);
 
         if ((uname != NULL) && (by_id->name != NULL)) {
             dump_peer_hash(LOG_WARNING, __func__);
             crm_crit("Nodes '%s' and '%s' share the same cluster nodeid %u: "
                      "assuming '%s' is correct",
                      uname, by_id->name, id, uname);
         }
 
     } else if ((uname != NULL) && (by_id->name != NULL)) {
         if (pcmk__str_eq(uname, by_id->name, pcmk__str_casei)) {
             crm_notice("Node '%s' has changed its cluster layer ID "
                        "from %" PRIu32 " to %" PRIu32,
                        by_id->name, by_name->cluster_layer_id,
                        by_id->cluster_layer_id);
             g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name);
 
         } else {
             crm_warn("Nodes '%s' and '%s' share the same cluster nodeid: %u %s",
                      by_id->name, by_name->name, id, uname);
             dump_peer_hash(LOG_INFO, __func__);
             crm_abort(__FILE__, __func__, __LINE__, "member weirdness", TRUE,
                       TRUE);
         }
 
     } else if ((id > 0) && (by_name->cluster_layer_id > 0)) {
         crm_warn("Nodes %" PRIu32 " and %" PRIu32 " share the same name: '%s'",
                  by_id->cluster_layer_id, by_name->cluster_layer_id, uname);
 
     } else {
         /* Simple merge */
 
         /* Only corosync-based clusters use node IDs. The functions that call
          * pcmk__update_peer_state() and crm_update_peer_proc() only know
          * nodeid, so 'by_id' is authoritative when merging.
          */
         dump_peer_hash(LOG_DEBUG, __func__);
 
         crm_info("Merging %p into %p", by_name, by_id);
         g_hash_table_foreach_remove(crm_peer_cache, hash_find_by_data, by_name);
     }
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Search caches for a node (cluster or Pacemaker Remote)
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] flags  Group of enum pcmk__node_search_flags
  *
  * \return Node cache entry if found, otherwise NULL
  */
 pcmk__node_status_t *
 pcmk__search_node_caches(unsigned int id, const char *uname, uint32_t flags)
 {
     pcmk__node_status_t *node = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     pcmk__cluster_init_node_caches();
 
     if ((uname != NULL) && pcmk_is_set(flags, pcmk__node_search_remote)) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
     }
 
     if ((node == NULL)
         && pcmk_is_set(flags, pcmk__node_search_cluster_member)) {
 
         node = search_cluster_member_cache(id, uname, NULL);
     }
 
     if ((node == NULL) && pcmk_is_set(flags, pcmk__node_search_cluster_cib)) {
         char *id_str = (id == 0)? NULL : crm_strdup_printf("%u", id);
 
         node = find_cib_cluster_node(id_str, uname);
         free(id_str);
     }
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Purge a node from cache (both cluster and Pacemaker Remote)
  *
  * \param[in] node_name  If not NULL, purge only nodes with this name
  * \param[in] node_id    If not 0, purge cluster nodes only if they have this ID
  *
  * \note If \p node_name is NULL and \p node_id is 0, no nodes will be purged.
  *       If \p node_name is not NULL and \p node_id is not 0, Pacemaker Remote
  *       nodes that match \p node_name will be purged, and cluster nodes that
  *       match both \p node_name and \p node_id will be purged.
  * \note The caller must be careful not to use \p node_name after calling this
  *       function if it might be a pointer into a cache entry being removed.
  */
 void
 pcmk__purge_node_from_cache(const char *node_name, uint32_t node_id)
 {
     char *node_name_copy = NULL;
 
     if ((node_name == NULL) && (node_id == 0U)) {
         return;
     }
 
     // Purge from Pacemaker Remote node cache
     if ((node_name != NULL)
         && (g_hash_table_lookup(crm_remote_peer_cache, node_name) != NULL)) {
         /* node_name could be a pointer into the cache entry being purged,
          * so reassign it to a copy before the original gets freed
          */
         node_name_copy = pcmk__str_copy(node_name);
         node_name = node_name_copy;
 
         crm_trace("Purging %s from Pacemaker Remote node cache", node_name);
         g_hash_table_remove(crm_remote_peer_cache, node_name);
     }
 
     pcmk__cluster_forget_cluster_node(node_id, node_name);
     free(node_name_copy);
 }
 
 #if SUPPORT_COROSYNC
 static guint
 remove_conflicting_peer(pcmk__node_status_t *node)
 {
     int matches = 0;
     GHashTableIter iter;
     pcmk__node_status_t *existing_node = NULL;
 
     if ((node->cluster_layer_id == 0) || (node->name == NULL)) {
         return 0;
     }
 
     if (!pcmk__corosync_has_nodelist()) {
         return 0;
     }
 
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &existing_node)) {
         if ((existing_node->cluster_layer_id > 0)
             && (existing_node->cluster_layer_id != node->cluster_layer_id)
             && pcmk__str_eq(existing_node->name, node->name, pcmk__str_casei)) {
 
             if (pcmk__cluster_is_node_active(existing_node)) {
                 continue;
             }
 
             crm_warn("Removing cached offline node %" PRIu32 "/%s which has "
                      "conflicting name with %" PRIu32,
                      existing_node->cluster_layer_id, existing_node->name,
                      node->cluster_layer_id);
 
             g_hash_table_iter_remove(&iter);
             matches++;
         }
     }
 
     return matches;
 }
 #endif
 
 /*!
  * \internal
  * \brief Get a cluster node cache entry, possibly creating one if not found
  *
  * If \c pcmk__node_search_cluster_member is set in \p flags, the return value
  * is guaranteed not to be \c NULL. A new cache entry is created if one does not
  * already exist.
  *
  * \param[in] id     If not 0, cluster node ID to search for
  * \param[in] uname  If not NULL, node name to search for
  * \param[in] uuid   If not NULL while id is 0, node UUID instead of cluster
  *                   node ID to search for
  * \param[in] flags  Group of enum pcmk__node_search_flags
  *
  * \return (Possibly newly created) cluster node cache entry
  */
 /* coverity[-alloc] Memory is referenced in one or both hashtables */
 pcmk__node_status_t *
 pcmk__get_node(unsigned int id, const char *uname, const char *uuid,
                uint32_t flags)
 {
     pcmk__node_status_t *node = NULL;
     char *uname_lookup = NULL;
 
     CRM_ASSERT(id > 0 || uname != NULL);
 
     pcmk__cluster_init_node_caches();
 
     // Check the Pacemaker Remote node cache first
     if (pcmk_is_set(flags, pcmk__node_search_remote)) {
         node = g_hash_table_lookup(crm_remote_peer_cache, uname);
         if (node != NULL) {
             return node;
         }
     }
 
     if (!pcmk_is_set(flags, pcmk__node_search_cluster_member)) {
         return NULL;
     }
 
     node = search_cluster_member_cache(id, uname, uuid);
 
     /* if uname wasn't provided, and find_peer did not turn up a uname based on id.
      * we need to do a lookup of the node name using the id in the cluster membership. */
     if ((uname == NULL) && ((node == NULL) || (node->name == NULL))) {
         uname_lookup = pcmk__cluster_node_name(id);
     }
 
     if (uname_lookup) {
         uname = uname_lookup;
         crm_trace("Inferred a name of '%s' for node %u", uname, id);
 
         /* try to turn up the node one more time now that we know the uname. */
         if (node == NULL) {
             node = search_cluster_member_cache(id, uname, uuid);
         }
     }
 
     if (node == NULL) {
         char *uniqueid = crm_generate_uuid();
 
         node = pcmk__assert_alloc(1, sizeof(pcmk__node_status_t));
 
         crm_info("Created entry %s/%p for node %s/%u (%d total)",
                  uniqueid, node, uname, id, 1 + g_hash_table_size(crm_peer_cache));
         g_hash_table_replace(crm_peer_cache, uniqueid, node);
     }
 
     if ((id > 0) && (uname != NULL)
         && ((node->cluster_layer_id == 0) || (node->name == NULL))) {
         crm_info("Node %u is now known as %s", id, uname);
     }
 
     if ((id > 0) && (node->cluster_layer_id == 0)) {
         node->cluster_layer_id = id;
     }
 
     if ((uname != NULL) && (node->name == NULL)) {
         update_peer_uname(node, uname);
     }
 
     if (node->xml_id == NULL) {
         if (uuid == NULL) {
             uuid = pcmk__cluster_node_uuid(node);
         }
 
         if (uuid) {
             crm_info("Node %u has uuid %s", id, uuid);
 
         } else {
             crm_info("Cannot obtain a UUID for node %u/%s", id, node->name);
         }
     }
 
     free(uname_lookup);
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Update a node's uname
  *
  * \param[in,out] node   Node object to update
  * \param[in]     uname  New name to set
  *
  * \note This function should not be called within a peer cache iteration,
  *       because in some cases it can remove conflicting cache entries,
  *       which would invalidate the iterator.
  */
 static void
 update_peer_uname(pcmk__node_status_t *node, const char *uname)
 {
     CRM_CHECK(uname != NULL,
               crm_err("Bug: can't update node name without name"); return);
     CRM_CHECK(node != NULL,
               crm_err("Bug: can't update node name to %s without node", uname);
               return);
 
     if (pcmk__str_eq(uname, node->name, pcmk__str_casei)) {
         crm_debug("Node name '%s' did not change", uname);
         return;
     }
 
     for (const char *c = uname; *c; ++c) {
         if ((*c >= 'A') && (*c <= 'Z')) {
             crm_warn("Node names with capitals are discouraged, consider changing '%s'",
                      uname);
             break;
         }
     }
 
     pcmk__str_update(&node->name, uname);
 
     if (peer_status_callback != NULL) {
         peer_status_callback(pcmk__node_update_name, node, NULL);
     }
 
 #if SUPPORT_COROSYNC
     if ((pcmk_get_cluster_layer() == pcmk_cluster_layer_corosync)
         && !pcmk_is_set(node->flags, pcmk__node_status_remote)) {
 
         remove_conflicting_peer(node);
     }
 #endif
 }
 
 /*!
  * \internal
  * \brief Get log-friendly string equivalent of a process flag
  *
  * \param[in] proc  Process flag
  *
  * \return Log-friendly string equivalent of \p proc
  */
 static inline const char *
 proc2text(enum crm_proc_flag proc)
 {
     const char *text = "unknown";
 
     switch (proc) {
         case crm_proc_none:
             text = "none";
             break;
         case crm_proc_cpg:
             text = "corosync-cpg";
             break;
     }
     return text;
 }
 
 /*!
  * \internal
  * \brief Update a node's process information (and potentially state)
  *
  * \param[in]     source  Caller's function name (for log messages)
  * \param[in,out] node    Node object to update
  * \param[in]     flag    Bitmask of new process information
  * \param[in]     status  node status (online, offline, etc.)
  *
  * \return NULL if any node was reaped from peer caches, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible, otherwise
  *       reaping could invalidate the iterator.
  */
 pcmk__node_status_t *
 crm_update_peer_proc(const char *source, pcmk__node_status_t *node,
                      uint32_t flag, const char *status)
 {
     uint32_t last = 0;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set %s to %s for NULL",
                                     source, proc2text(flag), status);
                             return NULL);
 
     /* Pacemaker doesn't spawn processes on remote nodes */
     if (pcmk_is_set(node->flags, pcmk__node_status_remote)) {
         return node;
     }
 
     last = node->processes;
     if (status == NULL) {
         node->processes = flag;
         if (node->processes != last) {
             changed = TRUE;
         }
 
     } else if (pcmk__str_eq(status, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
         if ((node->processes & flag) != flag) {
             node->processes = pcmk__set_flags_as(__func__, __LINE__,
                                                  LOG_TRACE, "Peer process",
                                                  node->name, node->processes,
                                                  flag, "processes");
             changed = TRUE;
         }
 
     } else if (node->processes & flag) {
         node->processes = pcmk__clear_flags_as(__func__, __LINE__,
                                                LOG_TRACE, "Peer process",
                                                node->name, node->processes,
                                                flag, "processes");
         changed = TRUE;
     }
 
     if (changed) {
         if (status == NULL && flag <= crm_proc_none) {
             crm_info("%s: Node %s[%" PRIu32 "] - all processes are now offline",
                      source, node->name, node->cluster_layer_id);
         } else {
             crm_info("%s: Node %s[%" PRIu32 "] - %s is now %s",
                      source, node->name, node->cluster_layer_id,
                      proc2text(flag), status);
         }
 
         if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
             node->when_online = time(NULL);
 
         } else {
             node->when_online = 0;
         }
 
         /* Call the client callback first, then update the peer state,
          * in case the node will be reaped
          */
         if (peer_status_callback != NULL) {
             peer_status_callback(pcmk__node_update_processes, node, &last);
         }
 
         /* The client callback shouldn't touch the peer caches,
          * but as a safety net, bail if the peer cache was destroyed.
          */
         if (crm_peer_cache == NULL) {
             return NULL;
         }
 
         if (autoreap) {
             const char *peer_state = NULL;
 
             if (pcmk_is_set(node->processes, crm_get_cluster_proc())) {
-                peer_state = CRM_NODE_MEMBER;
+                peer_state = PCMK_VALUE_MEMBER;
             } else {
                 peer_state = CRM_NODE_LOST;
             }
             node = pcmk__update_peer_state(__func__, node, peer_state, 0);
         }
     } else {
         crm_trace("%s: Node %s[%" PRIu32 "] - %s is unchanged (%s)",
                   source, node->name, node->cluster_layer_id, proc2text(flag),
                   status);
     }
     return node;
 }
 
 /*!
  * \internal
  * \brief Update a cluster node cache entry's expected join state
  *
  * \param[in]     source    Caller's function name (for logging)
  * \param[in,out] node      Node to update
  * \param[in]     expected  Node's new join state
  */
 void
 pcmk__update_peer_expected(const char *source, pcmk__node_status_t *node,
                            const char *expected)
 {
     char *last = NULL;
     gboolean changed = FALSE;
 
     CRM_CHECK(node != NULL, crm_err("%s: Could not set 'expected' to %s", source, expected);
               return);
 
     /* Remote nodes don't participate in joins */
     if (pcmk_is_set(node->flags, pcmk__node_status_remote)) {
         return;
     }
 
     last = node->expected;
     if (expected != NULL && !pcmk__str_eq(node->expected, expected, pcmk__str_casei)) {
         node->expected = strdup(expected);
         changed = TRUE;
     }
 
     if (changed) {
         crm_info("%s: Node %s[%" PRIu32 "] - expected state is now %s (was %s)",
                  source, node->name, node->cluster_layer_id, expected, last);
         free(last);
     } else {
         crm_trace("%s: Node %s[%" PRIu32 "] - expected state is unchanged (%s)",
                   source, node->name, node->cluster_layer_id, expected);
     }
 }
 
 /*!
  * \internal
  * \brief Update a node's state and membership information
  *
  * \param[in]     source      Caller's function name (for log messages)
  * \param[in,out] node        Node object to update
  * \param[in]     state       Node's new state
  * \param[in]     membership  Node's new membership ID
  * \param[in,out] iter        If not NULL, pointer to node's peer cache iterator
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function may be called from
  *       within a peer cache iteration if the iterator is supplied.
  */
 static pcmk__node_status_t *
 update_peer_state_iter(const char *source, pcmk__node_status_t *node,
                        const char *state, uint64_t membership,
                        GHashTableIter *iter)
 {
     gboolean is_member;
 
     CRM_CHECK(node != NULL,
               crm_err("Could not set state for unknown host to %s"
                       QB_XS " source=%s", state, source);
               return NULL);
 
-    is_member = pcmk__str_eq(state, CRM_NODE_MEMBER, pcmk__str_casei);
+    is_member = pcmk__str_eq(state, PCMK_VALUE_MEMBER, pcmk__str_none);
     if (is_member) {
         node->when_lost = 0;
         if (membership) {
             node->membership_id = membership;
         }
     }
 
     if (state && !pcmk__str_eq(node->state, state, pcmk__str_casei)) {
         char *last = node->state;
 
         if (is_member) {
              node->when_member = time(NULL);
 
         } else {
              node->when_member = 0;
         }
 
         node->state = strdup(state);
         crm_notice("Node %s state is now %s " QB_XS
                    " nodeid=%" PRIu32 " previous=%s source=%s",
                    node->name, state, node->cluster_layer_id,
                    pcmk__s(last, "unknown"), source);
         if (peer_status_callback != NULL) {
             peer_status_callback(pcmk__node_update_state, node, last);
         }
         free(last);
 
         if (autoreap && !is_member
             && !pcmk_is_set(node->flags, pcmk__node_status_remote)) {
             /* We only autoreap from the peer cache, not the remote peer cache,
              * because the latter should be managed only by
              * refresh_remote_nodes().
              */
             if(iter) {
                 crm_notice("Purged 1 peer with cluster layer ID=" PRIu32
                            "and/or name=%s from the membership cache",
                            node->cluster_layer_id, node->name);
                 g_hash_table_iter_remove(iter);
 
             } else {
                 pcmk__cluster_forget_cluster_node(node->cluster_layer_id,
                                                   node->name);
             }
             node = NULL;
         }
 
     } else {
         crm_trace("Node %s state is unchanged (%s) " QB_XS
                   " nodeid=%" PRIu32 " source=%s",
                   node->name, state, node->cluster_layer_id, source);
     }
     return node;
 }
 
 /*!
  * \brief Update a node's state and membership information
  *
  * \param[in]     source      Caller's function name (for log messages)
  * \param[in,out] node        Node object to update
  * \param[in]     state       Node's new state
  * \param[in]     membership  Node's new membership ID
  *
  * \return NULL if any node was reaped, value of node otherwise
  *
  * \note If this function returns NULL, the supplied node object was likely
  *       freed and should not be used again. This function should not be
  *       called within a cache iteration if reaping is possible,
  *       otherwise reaping could invalidate the iterator.
  */
 pcmk__node_status_t *
 pcmk__update_peer_state(const char *source, pcmk__node_status_t *node,
                         const char *state, uint64_t membership)
 {
     return update_peer_state_iter(source, node, state, membership, NULL);
 }
 
 /*!
  * \internal
  * \brief Reap all nodes from cache whose membership information does not match
  *
  * \param[in] membership  Membership ID of nodes to keep
  */
 void
 pcmk__reap_unseen_nodes(uint64_t membership)
 {
     GHashTableIter iter;
     pcmk__node_status_t *node = NULL;
 
     crm_trace("Reaping unseen nodes...");
     g_hash_table_iter_init(&iter, crm_peer_cache);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *)&node)) {
         if (node->membership_id != membership) {
             if (node->state) {
                 /*
                  * Calling update_peer_state_iter() allows us to
                  * remove the node from crm_peer_cache without
                  * invalidating our iterator
                  */
                 update_peer_state_iter(__func__, node, CRM_NODE_LOST,
                                            membership, &iter);
 
             } else {
                 crm_info("State of node %s[%" PRIu32 "] is still unknown",
                          node->name, node->cluster_layer_id);
             }
         }
     }
 }
 
 static pcmk__node_status_t *
 find_cib_cluster_node(const char *id, const char *uname)
 {
     GHashTableIter iter;
     pcmk__node_status_t *node = NULL;
     pcmk__node_status_t *by_id = NULL;
     pcmk__node_status_t *by_name = NULL;
 
     if (uname) {
         g_hash_table_iter_init(&iter, cluster_node_cib_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (pcmk__str_eq(node->name, uname, pcmk__str_casei)) {
                 crm_trace("Name match: %s = %p", node->name, node);
                 by_name = node;
                 break;
             }
         }
     }
 
     if (id) {
         g_hash_table_iter_init(&iter, cluster_node_cib_cache);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (pcmk__str_eq(node->xml_id, id, pcmk__str_casei)) {
                 crm_trace("ID match: %s= %p", id, node);
                 by_id = node;
                 break;
             }
         }
     }
 
     node = by_id; /* Good default */
     if (by_id == by_name) {
         /* Nothing to do if they match (both NULL counts) */
         crm_trace("Consistent: %p for %s/%s", by_id, id, uname);
 
     } else if (by_id == NULL && by_name) {
         crm_trace("Only one: %p for %s/%s", by_name, id, uname);
 
         if (id) {
             node = NULL;
 
         } else {
             node = by_name;
         }
 
     } else if (by_name == NULL && by_id) {
         crm_trace("Only one: %p for %s/%s", by_id, id, uname);
 
         if (uname) {
             node = NULL;
         }
 
     } else if ((uname != NULL) && (by_id->name != NULL)
                && pcmk__str_eq(uname, by_id->name, pcmk__str_casei)) {
         /* Multiple nodes have the same uname in the CIB.
          * Return by_id. */
 
     } else if ((id != NULL) && (by_name->xml_id != NULL)
                && pcmk__str_eq(id, by_name->xml_id, pcmk__str_casei)) {
         /* Multiple nodes have the same id in the CIB.
          * Return by_name. */
         node = by_name;
 
     } else {
         node = NULL;
     }
 
     if (node == NULL) {
         crm_debug("Couldn't find node%s%s%s%s",
                    id? " " : "",
                    id? id : "",
                    uname? " with name " : "",
                    uname? uname : "");
     }
 
     return node;
 }
 
 static void
 cluster_node_cib_cache_refresh_helper(xmlNode *xml_node, void *user_data)
 {
     const char *id = crm_element_value(xml_node, PCMK_XA_ID);
     const char *uname = crm_element_value(xml_node, PCMK_XA_UNAME);
     pcmk__node_status_t * node =  NULL;
 
     CRM_CHECK(id != NULL && uname !=NULL, return);
     node = find_cib_cluster_node(id, uname);
 
     if (node == NULL) {
         char *uniqueid = crm_generate_uuid();
 
         node = pcmk__assert_alloc(1, sizeof(pcmk__node_status_t));
 
         node->name = pcmk__str_copy(uname);
         node->xml_id = pcmk__str_copy(id);
 
         g_hash_table_replace(cluster_node_cib_cache, uniqueid, node);
 
     } else if (pcmk_is_set(node->flags, pcmk__node_status_dirty)) {
         pcmk__str_update(&node->name, uname);
 
         /* Node is in cache and hasn't been updated already, so mark it clean */
         clear_peer_flags(node, pcmk__node_status_dirty);
     }
 
 }
 
 static void
 refresh_cluster_node_cib_cache(xmlNode *cib)
 {
     pcmk__cluster_init_node_caches();
 
     g_hash_table_foreach(cluster_node_cib_cache, mark_dirty, NULL);
 
     crm_foreach_xpath_result(cib, PCMK__XP_MEMBER_NODE_CONFIG,
                              cluster_node_cib_cache_refresh_helper, NULL);
 
     // Remove all old cache entries that weren't seen in the CIB
     g_hash_table_foreach_remove(cluster_node_cib_cache, is_dirty, NULL);
 }
 
 void
 pcmk__refresh_node_caches_from_cib(xmlNode *cib)
 {
     refresh_remote_nodes(cib);
     refresh_cluster_node_cib_cache(cib);
 }
 
 // Deprecated functions kept only for backward API compatibility
 // LCOV_EXCL_START
 
 #include <crm/cluster/compat.h>
 
 void
 crm_peer_init(void)
 {
     pcmk__cluster_init_node_caches();
 }
 
 // LCOV_EXCL_STOP
 // End deprecated API