diff --git a/daemons/fenced/pacemaker-fenced.c b/daemons/fenced/pacemaker-fenced.c
index 783e6fae48..fc632befe4 100644
--- a/daemons/fenced/pacemaker-fenced.c
+++ b/daemons/fenced/pacemaker-fenced.c
@@ -1,1758 +1,1758 @@
 /*
  * Copyright 2009-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <stdio.h>
 #include <sys/types.h>
 #include <sys/stat.h>
 #include <unistd.h>
 #include <sys/utsname.h>
 
 #include <stdlib.h>
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>  // PRIu32, PRIx32
 
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/cmdline_internal.h>
 #include <crm/common/ipc.h>
 #include <crm/common/ipc_internal.h>
 #include <crm/common/output_internal.h>
 #include <crm/cluster/internal.h>
 
 #include <crm/stonith-ng.h>
 #include <crm/fencing/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/mainloop.h>
 
 #include <crm/cib/internal.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <pacemaker-internal.h>
 
 #include <pacemaker-fenced.h>
 
 #define SUMMARY "daemon for executing fencing devices in a Pacemaker cluster"
 
 char *stonith_our_uname = NULL;
 long stonith_watchdog_timeout_ms = 0;
 GList *stonith_watchdog_targets = NULL;
 
 static GMainLoop *mainloop = NULL;
 
 gboolean stand_alone = FALSE;
 static gboolean stonith_shutdown_flag = FALSE;
 
 static qb_ipcs_service_t *ipcs = NULL;
 static xmlNode *local_cib = NULL;
 static pe_working_set_t *fenced_data_set = NULL;
 static const unsigned long long data_set_flags = pe_flag_quick_location
                                                  | pe_flag_no_compat
                                                  | pe_flag_no_counts;
 
 static cib_t *cib_api = NULL;
 
 static pcmk__output_t *logger_out = NULL;
 static pcmk__output_t *out = NULL;
 
 pcmk__supported_format_t formats[] = {
     PCMK__SUPPORTED_FORMAT_NONE,
     PCMK__SUPPORTED_FORMAT_TEXT,
     PCMK__SUPPORTED_FORMAT_XML,
     { NULL, NULL, NULL }
 };
 
 static struct {
     bool no_cib_connect;
     gchar **log_files;
 } options;
 
 static crm_exit_t exit_code = CRM_EX_OK;
 
 static void stonith_shutdown(int nsig);
 static void stonith_cleanup(void);
 
 static int32_t
 st_ipc_accept(qb_ipcs_connection_t * c, uid_t uid, gid_t gid)
 {
     if (stonith_shutdown_flag) {
         crm_info("Ignoring new client [%d] during shutdown",
                  pcmk__client_pid(c));
         return -EPERM;
     }
 
     if (pcmk__new_client(c, uid, gid) == NULL) {
         return -EIO;
     }
     return 0;
 }
 
 /* Exit code means? */
 static int32_t
 st_ipc_dispatch(qb_ipcs_connection_t * qbc, void *data, size_t size)
 {
     uint32_t id = 0;
     uint32_t flags = 0;
     int call_options = 0;
     xmlNode *request = NULL;
     pcmk__client_t *c = pcmk__find_client(qbc);
     const char *op = NULL;
 
     if (c == NULL) {
         crm_info("Invalid client: %p", qbc);
         return 0;
     }
 
     request = pcmk__client_data2xml(c, data, &id, &flags);
     if (request == NULL) {
         pcmk__ipc_send_ack(c, id, flags, "nack", NULL, CRM_EX_PROTOCOL);
         return 0;
     }
 
 
     op = crm_element_value(request, F_CRM_TASK);
     if(pcmk__str_eq(op, CRM_OP_RM_NODE_CACHE, pcmk__str_casei)) {
         crm_xml_add(request, F_TYPE, T_STONITH_NG);
         crm_xml_add(request, F_STONITH_OPERATION, op);
         crm_xml_add(request, F_STONITH_CLIENTID, c->id);
         crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c));
         crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
         send_cluster_message(NULL, crm_msg_stonith_ng, request, FALSE);
         free_xml(request);
         return 0;
     }
 
     if (c->name == NULL) {
         const char *value = crm_element_value(request, F_STONITH_CLIENTNAME);
 
         if (value == NULL) {
             value = "unknown";
         }
         c->name = crm_strdup_printf("%s.%u", value, c->pid);
     }
 
     crm_element_value_int(request, F_STONITH_CALLOPTS, &call_options);
     crm_trace("Flags %#08" PRIx32 "/%#08x for command %" PRIu32
               " from client %s", flags, call_options, id, pcmk__client_name(c));
 
     if (pcmk_is_set(call_options, st_opt_sync_call)) {
         CRM_ASSERT(flags & crm_ipc_client_response);
         CRM_LOG_ASSERT(c->request_id == 0);     /* This means the client has two synchronous events in-flight */
         c->request_id = id;     /* Reply only to the last one */
     }
 
     crm_xml_add(request, F_STONITH_CLIENTID, c->id);
     crm_xml_add(request, F_STONITH_CLIENTNAME, pcmk__client_name(c));
     crm_xml_add(request, F_STONITH_CLIENTNODE, stonith_our_uname);
 
     crm_log_xml_trace(request, "ipc-received");
     stonith_command(c, id, flags, request, NULL);
 
     free_xml(request);
     return 0;
 }
 
 /* Error code means? */
 static int32_t
 st_ipc_closed(qb_ipcs_connection_t * c)
 {
     pcmk__client_t *client = pcmk__find_client(c);
 
     if (client == NULL) {
         return 0;
     }
 
     crm_trace("Connection %p closed", c);
     pcmk__free_client(client);
 
     /* 0 means: yes, go ahead and destroy the connection */
     return 0;
 }
 
 static void
 st_ipc_destroy(qb_ipcs_connection_t * c)
 {
     crm_trace("Connection %p destroyed", c);
     st_ipc_closed(c);
 }
 
 static void
 stonith_peer_callback(xmlNode * msg, void *private_data)
 {
     const char *remote_peer = crm_element_value(msg, F_ORIG);
     const char *op = crm_element_value(msg, F_STONITH_OPERATION);
 
     if (pcmk__str_eq(op, "poke", pcmk__str_none)) {
         return;
     }
 
     crm_log_xml_trace(msg, "Peer[inbound]");
     stonith_command(NULL, 0, 0, msg, remote_peer);
 }
 
 #if SUPPORT_COROSYNC
 static void
 stonith_peer_ais_callback(cpg_handle_t handle,
                           const struct cpg_name *groupName,
                           uint32_t nodeid, uint32_t pid, void *msg, size_t msg_len)
 {
     uint32_t kind = 0;
     xmlNode *xml = NULL;
     const char *from = NULL;
     char *data = pcmk_message_common_cs(handle, nodeid, pid, msg, &kind, &from);
 
     if(data == NULL) {
         return;
     }
     if (kind == crm_class_cluster) {
         xml = string2xml(data);
         if (xml == NULL) {
             crm_err("Invalid XML: '%.120s'", data);
             free(data);
             return;
         }
         crm_xml_add(xml, F_ORIG, from);
         /* crm_xml_add_int(xml, F_SEQ, wrapper->id); */
         stonith_peer_callback(xml, NULL);
     }
 
     free_xml(xml);
     free(data);
     return;
 }
 
 static void
 stonith_peer_cs_destroy(gpointer user_data)
 {
     crm_crit("Lost connection to cluster layer, shutting down");
     stonith_shutdown(0);
 }
 #endif
 
 void
 do_local_reply(xmlNode *notify_src, pcmk__client_t *client, int call_options)
 {
     /* send callback to originating child */
     int local_rc = pcmk_rc_ok;
     int rid = 0;
     uint32_t ipc_flags = crm_ipc_server_event;
 
     if (pcmk_is_set(call_options, st_opt_sync_call)) {
         CRM_LOG_ASSERT(client->request_id);
         rid = client->request_id;
         client->request_id = 0;
         ipc_flags = crm_ipc_flags_none;
     }
 
     local_rc = pcmk__ipc_send_xml(client, rid, notify_src, ipc_flags);
     if (local_rc == pcmk_rc_ok) {
         crm_trace("Sent response %d to client %s",
                   rid, pcmk__client_name(client));
     } else {
         crm_warn("%synchronous reply to client %s failed: %s",
                  (pcmk_is_set(call_options, st_opt_sync_call)? "S" : "As"),
                  pcmk__client_name(client), pcmk_rc_str(local_rc));
     }
 }
 
 uint64_t
 get_stonith_flag(const char *name)
 {
     if (pcmk__str_eq(name, T_STONITH_NOTIFY_FENCE, pcmk__str_casei)) {
         return st_callback_notify_fence;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_ADD, pcmk__str_casei)) {
         return st_callback_device_add;
 
     } else if (pcmk__str_eq(name, STONITH_OP_DEVICE_DEL, pcmk__str_casei)) {
         return st_callback_device_del;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY, pcmk__str_casei)) {
         return st_callback_notify_history;
 
     } else if (pcmk__str_eq(name, T_STONITH_NOTIFY_HISTORY_SYNCED, pcmk__str_casei)) {
         return st_callback_notify_history_synced;
 
     }
     return st_callback_unknown;
 }
 
 static void
 stonith_notify_client(gpointer key, gpointer value, gpointer user_data)
 {
 
     xmlNode *update_msg = user_data;
     pcmk__client_t *client = value;
     const char *type = NULL;
 
     CRM_CHECK(client != NULL, return);
     CRM_CHECK(update_msg != NULL, return);
 
     type = crm_element_value(update_msg, F_SUBTYPE);
     CRM_CHECK(type != NULL, crm_log_xml_err(update_msg, "notify"); return);
 
     if (client->ipcs == NULL) {
         crm_trace("Skipping client with NULL channel");
         return;
     }
 
     if (pcmk_is_set(client->flags, get_stonith_flag(type))) {
         int rc = pcmk__ipc_send_xml(client, 0, update_msg,
                                     crm_ipc_server_event);
 
         if (rc != pcmk_rc_ok) {
             crm_warn("%s notification of client %s failed: %s "
                      CRM_XS " id=%.8s rc=%d", type, pcmk__client_name(client),
                      pcmk_rc_str(rc), client->id, rc);
         } else {
             crm_trace("Sent %s notification to client %s",
                       type, pcmk__client_name(client));
         }
     }
 }
 
 void
 do_stonith_async_timeout_update(const char *client_id, const char *call_id, int timeout)
 {
     pcmk__client_t *client = NULL;
     xmlNode *notify_data = NULL;
 
     if (!timeout || !call_id || !client_id) {
         return;
     }
 
     client = pcmk__find_client_by_id(client_id);
     if (!client) {
         return;
     }
 
     notify_data = create_xml_node(NULL, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_TYPE, T_STONITH_TIMEOUT_VALUE);
     crm_xml_add(notify_data, F_STONITH_CALLID, call_id);
     crm_xml_add_int(notify_data, F_STONITH_TIMEOUT, timeout);
 
     crm_trace("timeout update is %d for client %s and call id %s", timeout, client_id, call_id);
 
     if (client) {
         pcmk__ipc_send_xml(client, 0, notify_data, crm_ipc_server_event);
     }
 
     free_xml(notify_data);
 }
 
 /*!
  * \internal
  * \brief Notify relevant IPC clients of a fencing operation result
  *
  * \param[in] type     Notification type
  * \param[in] result   Result of fencing operation (assume success if NULL)
  * \param[in] data     If not NULL, add to notification as call data
  */
 void
 fenced_send_notification(const char *type, const pcmk__action_result_t *result,
                          xmlNode *data)
 {
     /* TODO: Standardize the contents of data */
     xmlNode *update_msg = create_xml_node(NULL, "notify");
 
     CRM_LOG_ASSERT(type != NULL);
 
     crm_xml_add(update_msg, F_TYPE, T_STONITH_NOTIFY);
     crm_xml_add(update_msg, F_SUBTYPE, type);
     crm_xml_add(update_msg, F_STONITH_OPERATION, type);
     stonith__xe_set_result(update_msg, result);
 
     if (data != NULL) {
         add_message_xml(update_msg, F_STONITH_CALLDATA, data);
     }
 
     crm_trace("Notifying clients");
     pcmk__foreach_ipc_client(stonith_notify_client, update_msg);
     free_xml(update_msg);
     crm_trace("Notify complete");
 }
 
 /*!
  * \internal
  * \brief Send notifications for a configuration change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_DEVICE_ADD,
  *                    STONITH_OP_DEVICE_DEL, STONITH_OP_LEVEL_ADD, or
  *                    STONITH_OP_LEVEL_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    Description of what changed
  * \param[in] active  Current number of devices or topologies in use
  */
 static void
 send_config_notification(const char *op, const pcmk__action_result_t *result,
                          const char *desc, int active)
 {
     xmlNode *notify_data = create_xml_node(NULL, op);
 
     CRM_CHECK(notify_data != NULL, return);
 
     crm_xml_add(notify_data, F_STONITH_DEVICE, desc);
     crm_xml_add_int(notify_data, F_STONITH_ACTIVE, active);
 
     fenced_send_notification(op, result, notify_data);
     free_xml(notify_data);
 }
 
 /*!
  * \internal
  * \brief Send notifications for a device change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_DEVICE_ADD or
  *                    STONITH_OP_DEVICE_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    ID of device that changed
  */
 void
 fenced_send_device_notification(const char *op,
                                 const pcmk__action_result_t *result,
                                 const char *desc)
 {
     send_config_notification(op, result, desc, g_hash_table_size(device_list));
 }
 
 /*!
  * \internal
  * \brief Send notifications for a topology level change to subscribed clients
  *
  * \param[in] op      Notification type (STONITH_OP_LEVEL_ADD or
  *                    STONITH_OP_LEVEL_DEL)
  * \param[in] result  Operation result
  * \param[in] desc    String representation of level (<target>[<level_index>])
  */
 void
 fenced_send_level_notification(const char *op,
                                const pcmk__action_result_t *result,
                                const char *desc)
 {
     send_config_notification(op, result, desc, g_hash_table_size(topology));
 }
 
 static void
 topology_remove_helper(const char *node, int level)
 {
     char *desc = NULL;
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
     xmlNode *data = create_xml_node(NULL, XML_TAG_FENCING_LEVEL);
 
     crm_xml_add(data, F_STONITH_ORIGIN, __func__);
     crm_xml_add_int(data, XML_ATTR_STONITH_INDEX, level);
     crm_xml_add(data, XML_ATTR_STONITH_TARGET, node);
 
     fenced_unregister_level(data, &desc, &result);
     fenced_send_level_notification(STONITH_OP_LEVEL_DEL, &result, desc);
     pcmk__reset_result(&result);
     free_xml(data);
     free(desc);
 }
 
 static void
 remove_cib_device(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         const char *rsc_id = NULL;
         const char *standard = NULL;
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if(match != NULL) {
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
         }
 
         if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
             continue;
         }
 
         rsc_id = crm_element_value(match, XML_ATTR_ID);
 
         stonith_device_remove(rsc_id, true);
     }
 }
 
 static void
 remove_topology_level(xmlNode *match)
 {
     int index = 0;
     char *key = NULL;
 
     CRM_CHECK(match != NULL, return);
 
     key = stonith_level_key(match, fenced_target_by_unknown);
     crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
     topology_remove_helper(key, index);
     free(key);
 }
 
 static void
 add_topology_level(xmlNode *match)
 {
     char *desc = NULL;
     pcmk__action_result_t result = PCMK__UNKNOWN_RESULT;
 
     CRM_CHECK(match != NULL, return);
 
     fenced_register_level(match, &desc, &result);
     fenced_send_level_notification(STONITH_OP_LEVEL_ADD, &result, desc);
     pcmk__reset_result(&result);
     free(desc);
 }
 
 static void
 remove_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         CRM_LOG_ASSERT(match != NULL);
         if (match && crm_element_value(match, XML_DIFF_MARKER)) {
             /* Deletion */
             int index = 0;
             char *target = stonith_level_key(match, fenced_target_by_unknown);
 
             crm_element_value_int(match, XML_ATTR_STONITH_INDEX, &index);
             if (target == NULL) {
                 crm_err("Invalid fencing target in element %s", ID(match));
 
             } else if (index <= 0) {
                 crm_err("Invalid level for %s in element %s", target, ID(match));
 
             } else {
                 topology_remove_helper(target, index);
             }
             /* } else { Deal with modifications during the 'addition' stage */
         }
     }
 }
 
 static void
 register_fencing_topology(xmlXPathObjectPtr xpathObj)
 {
     int max = numXpathResults(xpathObj), lpc = 0;
 
     for (lpc = 0; lpc < max; lpc++) {
         xmlNode *match = getXpathResult(xpathObj, lpc);
 
         remove_topology_level(match);
         add_topology_level(match);
     }
 }
 
 /* Fencing
 <diff crm_feature_set="3.0.6">
   <diff-removed>
     <fencing-topology>
       <fencing-level id="f-p1.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="removed:top"/>
       <fencing-level id="f-p1.2" target="pcmk-1" index="2" devices="power" __crm_diff_marker__="removed:top"/>
       <fencing-level devices="disk,network" id="f-p2.1"/>
     </fencing-topology>
   </diff-removed>
   <diff-added>
     <fencing-topology>
       <fencing-level id="f-p.1" target="pcmk-1" index="1" devices="poison-pill" __crm_diff_marker__="added:top"/>
       <fencing-level id="f-p2.1" target="pcmk-2" index="1" devices="disk,something"/>
       <fencing-level id="f-p3.1" target="pcmk-2" index="2" devices="power" __crm_diff_marker__="added:top"/>
     </fencing-topology>
   </diff-added>
 </diff>
 */
 
 static void
 fencing_topology_init(void)
 {
     xmlXPathObjectPtr xpathObj = NULL;
     const char *xpath = "//" XML_TAG_FENCING_LEVEL;
 
     crm_trace("Full topology refresh");
     free_topology_list();
     init_topology_list();
 
     /* Grab everything */
     xpathObj = xpath_search(local_cib, xpath);
     register_fencing_topology(xpathObj);
 
     freeXpathObject(xpathObj);
 }
 
 #define rsc_name(x) x->clone_name?x->clone_name:x->id
 
 /*!
  * \internal
  * \brief Check whether our uname is in a resource's allowed node list
  *
  * \param[in] rsc  Resource to check
  *
  * \return Pointer to node object if found, NULL otherwise
  */
 static pe_node_t *
 our_node_allowed_for(const pe_resource_t *rsc)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (rsc && stonith_our_uname) {
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (node && strcmp(node->details->uname, stonith_our_uname) == 0) {
                 break;
             }
             node = NULL;
         }
     }
     return node;
 }
 
 static void
 watchdog_device_update(void)
 {
     if (stonith_watchdog_timeout_ms > 0) {
         if (!g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) &&
             !stonith_watchdog_targets) {
             /* getting here watchdog-fencing enabled, no device there yet
                and reason isn't stonith_watchdog_targets preventing that
              */
             int rc;
             xmlNode *xml;
 
             xml = create_device_registration_xml(
                     STONITH_WATCHDOG_ID,
                     st_namespace_internal,
                     STONITH_WATCHDOG_AGENT,
                     NULL, /* stonith_device_register will add our
                              own name as PCMK_STONITH_HOST_LIST param
                              so we can skip that here
                            */
                     NULL);
             rc = stonith_device_register(xml, TRUE);
             free_xml(xml);
             if (rc != pcmk_ok) {
                 rc = pcmk_legacy2rc(rc);
                 exit_code = CRM_EX_FATAL;
                 crm_crit("Cannot register watchdog pseudo fence agent: %s",
                          pcmk_rc_str(rc));
                 stonith_shutdown(0);
             }
         }
 
     } else if (g_hash_table_lookup(device_list, STONITH_WATCHDOG_ID) != NULL) {
         /* be silent if no device - todo parameter to stonith_device_remove */
         stonith_device_remove(STONITH_WATCHDOG_ID, true);
     }
 }
 
 static void
 update_stonith_watchdog_timeout_ms(xmlNode *cib)
 {
     long timeout_ms = 0;
     xmlNode *stonith_watchdog_xml = NULL;
     const char *value = NULL;
 
     stonith_watchdog_xml = get_xpath_object("//nvpair[@name='stonith-watchdog-timeout']",
 					    cib, LOG_NEVER);
     if (stonith_watchdog_xml) {
         value = crm_element_value(stonith_watchdog_xml, XML_NVPAIR_ATTR_VALUE);
     }
     if (value) {
         timeout_ms = crm_get_msec(value);
     }
 
     if (timeout_ms < 0) {
         timeout_ms = pcmk__auto_watchdog_timeout();
     }
 
     stonith_watchdog_timeout_ms = timeout_ms;
 }
 
 /*!
  * \internal
  * \brief If a resource or any of its children are STONITH devices, update their
  *        definitions given a cluster working set.
  *
  * \param[in,out] rsc       Resource to check
  * \param[in,out] data_set  Cluster working set with device information
  */
 static void
 cib_device_update(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_node_t *node = NULL;
     const char *value = NULL;
     const char *rclass = NULL;
     pe_node_t *parent = NULL;
 
     /* If this is a complex resource, check children rather than this resource itself. */
     if(rsc->children) {
         GList *gIter = NULL;
         for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             cib_device_update(gIter->data, data_set);
             if(pe_rsc_is_clone(rsc)) {
                 crm_trace("Only processing one copy of the clone %s", rsc->id);
                 break;
             }
         }
         return;
     }
 
     /* We only care about STONITH resources. */
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     if (!pcmk__str_eq(rclass, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         return;
     }
 
     /* If this STONITH resource is disabled, remove it. */
     if (pe__resource_is_disabled(rsc)) {
         crm_info("Device %s has been disabled", rsc->id);
         return;
     }
 
     /* if watchdog-fencing is disabled handle any watchdog-fence
        resource as if it was disabled
      */
     if ((stonith_watchdog_timeout_ms <= 0) &&
         pcmk__str_eq(rsc->id, STONITH_WATCHDOG_ID, pcmk__str_none)) {
         crm_info("Watchdog-fencing disabled thus handling "
                  "device %s as disabled", rsc->id);
         return;
     }
 
     /* Check whether our node is allowed for this resource (and its parent if in a group) */
     node = our_node_allowed_for(rsc);
-    if (rsc->parent && (rsc->parent->variant == pe_group)) {
+    if (rsc->parent && (rsc->parent->variant == pcmk_rsc_variant_group)) {
         parent = our_node_allowed_for(rsc->parent);
     }
 
     if(node == NULL) {
         /* Our node is disallowed, so remove the device */
         GHashTableIter iter;
 
         crm_info("Device %s has been disabled on %s: unknown", rsc->id, stonith_our_uname);
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             crm_trace("Available: %s = %d", pe__node_name(node), node->weight);
         }
 
         return;
 
     } else if(node->weight < 0 || (parent && parent->weight < 0)) {
         /* Our node (or its group) is disallowed by score, so remove the device */
         int score = (node->weight < 0)? node->weight : parent->weight;
 
         crm_info("Device %s has been disabled on %s: score=%s",
                  rsc->id, stonith_our_uname, pcmk_readable_score(score));
         return;
 
     } else {
         /* Our node is allowed, so update the device information */
         int rc;
         xmlNode *data;
         GHashTable *rsc_params = NULL;
         GHashTableIter gIter;
         stonith_key_value_t *params = NULL;
 
         const char *name = NULL;
         const char *agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE);
         const char *rsc_provides = NULL;
 
         crm_debug("Device %s is allowed on %s: score=%d", rsc->id, stonith_our_uname, node->weight);
         rsc_params = pe_rsc_params(rsc, node, data_set);
         get_meta_attributes(rsc->meta, rsc, node, data_set);
 
         rsc_provides = g_hash_table_lookup(rsc->meta, PCMK_STONITH_PROVIDES);
 
         g_hash_table_iter_init(&gIter, rsc_params);
         while (g_hash_table_iter_next(&gIter, (gpointer *) & name, (gpointer *) & value)) {
             if (!name || !value) {
                 continue;
             }
             params = stonith_key_value_add(params, name, value);
             crm_trace(" %s=%s", name, value);
         }
 
         data = create_device_registration_xml(rsc_name(rsc), st_namespace_any,
                                               agent, params, rsc_provides);
         stonith_key_value_freeall(params, 1, 1);
         rc = stonith_device_register(data, TRUE);
         CRM_ASSERT(rc == pcmk_ok);
         free_xml(data);
     }
 }
 
 /*!
  * \internal
  * \brief Update all STONITH device definitions based on current CIB
  */
 static void
 cib_devices_update(void)
 {
     GHashTableIter iter;
     stonith_device_t *device = NULL;
 
     crm_info("Updating devices to version %s.%s.%s",
              crm_element_value(local_cib, XML_ATTR_GENERATION_ADMIN),
              crm_element_value(local_cib, XML_ATTR_GENERATION),
              crm_element_value(local_cib, XML_ATTR_NUMUPDATES));
 
     if (fenced_data_set->now != NULL) {
         crm_time_free(fenced_data_set->now);
         fenced_data_set->now = NULL;
     }
     fenced_data_set->localhost = stonith_our_uname;
     pcmk__schedule_actions(local_cib, data_set_flags, fenced_data_set);
 
     g_hash_table_iter_init(&iter, device_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
         if (device->cib_registered) {
             device->dirty = TRUE;
         }
     }
 
     /* have list repopulated if cib has a watchdog-fencing-resource
        TODO: keep a cached list for queries happening while we are refreshing
      */
     g_list_free_full(stonith_watchdog_targets, free);
     stonith_watchdog_targets = NULL;
     g_list_foreach(fenced_data_set->resources, (GFunc) cib_device_update, fenced_data_set);
 
     g_hash_table_iter_init(&iter, device_list);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&device)) {
         if (device->dirty) {
             g_hash_table_iter_remove(&iter);
         }
     }
 
     fenced_data_set->input = NULL; // Wasn't a copy, so don't let API free it
     pe_reset_working_set(fenced_data_set);
 }
 
 static void
 update_cib_stonith_devices_v2(const char *event, xmlNode * msg)
 {
     xmlNode *change = NULL;
     char *reason = NULL;
     bool needs_update = FALSE;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     for (change = pcmk__xml_first_child(patchset); change != NULL;
          change = pcmk__xml_next(change)) {
         const char *op = crm_element_value(change, XML_DIFF_OP);
         const char *xpath = crm_element_value(change, XML_DIFF_PATH);
         const char *shortpath = NULL;
 
         if ((op == NULL) ||
             (strcmp(op, "move") == 0) ||
             strstr(xpath, "/"XML_CIB_TAG_STATUS)) {
             continue;
         } else if (pcmk__str_eq(op, "delete", pcmk__str_casei) && strstr(xpath, "/"XML_CIB_TAG_RESOURCE)) {
             const char *rsc_id = NULL;
             char *search = NULL;
             char *mutable = NULL;
 
             if (strstr(xpath, XML_TAG_ATTR_SETS) ||
                 strstr(xpath, XML_TAG_META_SETS)) {
                 needs_update = TRUE;
                 pcmk__str_update(&reason,
                                  "(meta) attribute deleted from resource");
                 break;
             }
             pcmk__str_update(&mutable, xpath);
             rsc_id = strstr(mutable, "primitive[@" XML_ATTR_ID "=\'");
             if (rsc_id != NULL) {
                 rsc_id += strlen("primitive[@" XML_ATTR_ID "=\'");
                 search = strchr(rsc_id, '\'');
             }
             if (search != NULL) {
                 *search = 0;
                 stonith_device_remove(rsc_id, true);
                 /* watchdog_device_update called afterwards
                    to fall back to implicit definition if needed */
             } else {
                 crm_warn("Ignoring malformed CIB update (resource deletion)");
             }
             free(mutable);
 
         } else if (strstr(xpath, "/"XML_CIB_TAG_RESOURCES) ||
                    strstr(xpath, "/"XML_CIB_TAG_CONSTRAINTS) ||
                    strstr(xpath, "/"XML_CIB_TAG_RSCCONFIG)) {
             shortpath = strrchr(xpath, '/'); CRM_ASSERT(shortpath);
             reason = crm_strdup_printf("%s %s", op, shortpath+1);
             needs_update = TRUE;
             break;
         }
     }
 
     if(needs_update) {
         crm_info("Updating device list from CIB: %s", reason);
         cib_devices_update();
     } else {
         crm_trace("No updates for device list found in CIB");
     }
     free(reason);
 }
 
 
 static void
 update_cib_stonith_devices_v1(const char *event, xmlNode * msg)
 {
     const char *reason = "none";
     gboolean needs_update = FALSE;
     xmlXPathObjectPtr xpath_obj = NULL;
 
     /* process new constraints */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_CONS_TAG_RSC_LOCATION);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         /* Safest and simplest to always recompute */
         needs_update = TRUE;
         reason = "new location constraint";
 
         for (lpc = 0; lpc < max; lpc++) {
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             crm_log_xml_trace(match, "new constraint");
         }
     }
     freeXpathObject(xpath_obj);
 
     /* process deletions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         remove_cib_device(xpath_obj);
     }
     freeXpathObject(xpath_obj);
 
     /* process additions */
     xpath_obj = xpath_search(msg, "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_CIB_TAG_RESOURCE);
     if (numXpathResults(xpath_obj) > 0) {
         int max = numXpathResults(xpath_obj), lpc = 0;
 
         for (lpc = 0; lpc < max; lpc++) {
             const char *rsc_id = NULL;
             const char *standard = NULL;
             xmlNode *match = getXpathResult(xpath_obj, lpc);
 
             rsc_id = crm_element_value(match, XML_ATTR_ID);
             standard = crm_element_value(match, XML_AGENT_ATTR_CLASS);
 
             if (!pcmk__str_eq(standard, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
                 continue;
             }
 
             crm_trace("Fencing resource %s was added or modified", rsc_id);
             reason = "new resource";
             needs_update = TRUE;
         }
     }
     freeXpathObject(xpath_obj);
 
     if(needs_update) {
         crm_info("Updating device list from CIB: %s", reason);
         cib_devices_update();
     }
 }
 
 static void
 update_cib_stonith_devices(const char *event, xmlNode * msg)
 {
     int format = 1;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
     switch(format) {
         case 1:
             update_cib_stonith_devices_v1(event, msg);
             break;
         case 2:
             update_cib_stonith_devices_v2(event, msg);
             break;
         default:
             crm_warn("Unknown patch format: %d", format);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a node has a specific attribute name/value
  *
  * \param[in] node    Name of node to check
  * \param[in] name    Name of an attribute to look for
  * \param[in] value   The value the named attribute needs to be set to in order to be considered a match
  *
  * \return TRUE if the locally cached CIB has the specified node attribute
  */
 gboolean
 node_has_attr(const char *node, const char *name, const char *value)
 {
     GString *xpath = NULL;
     xmlNode *match;
 
     CRM_CHECK((local_cib != NULL) && (node != NULL) && (name != NULL)
               && (value != NULL), return FALSE);
 
     /* Search for the node's attributes in the CIB. While the schema allows
      * multiple sets of instance attributes, and allows instance attributes to
      * use id-ref to reference values elsewhere, that is intended for resources,
      * so we ignore that here.
      */
     xpath = g_string_sized_new(256);
     pcmk__g_strcat(xpath,
                    "//" XML_CIB_TAG_NODES "/" XML_CIB_TAG_NODE
                    "[@" XML_ATTR_UNAME "='", node, "']/" XML_TAG_ATTR_SETS
                    "/" XML_CIB_TAG_NVPAIR
                    "[@" XML_NVPAIR_ATTR_NAME "='", name, "' "
                    "and @" XML_NVPAIR_ATTR_VALUE "='", value, "']", NULL);
 
     match = get_xpath_object((const char *) xpath->str, local_cib, LOG_NEVER);
 
     g_string_free(xpath, TRUE);
     return (match != NULL);
 }
 
 /*!
  * \internal
  * \brief Check whether a node does watchdog-fencing
  *
  * \param[in] node    Name of node to check
  *
  * \return TRUE if node found in stonith_watchdog_targets
  *         or stonith_watchdog_targets is empty indicating
  *         all nodes are doing watchdog-fencing
  */
 gboolean
 node_does_watchdog_fencing(const char *node)
 {
     return ((stonith_watchdog_targets == NULL) ||
             pcmk__str_in_list(node, stonith_watchdog_targets, pcmk__str_casei));
 }
 
 
 static void
 update_fencing_topology(const char *event, xmlNode * msg)
 {
     int format = 1;
     const char *xpath;
     xmlXPathObjectPtr xpathObj = NULL;
     xmlNode *patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
 
     CRM_ASSERT(patchset);
     crm_element_value_int(patchset, "format", &format);
 
     if(format == 1) {
         /* Process deletions (only) */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_REMOVED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         remove_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
         /* Process additions and changes */
         xpath = "//" F_CIB_UPDATE_RESULT "//" XML_TAG_DIFF_ADDED "//" XML_TAG_FENCING_LEVEL;
         xpathObj = xpath_search(msg, xpath);
 
         register_fencing_topology(xpathObj);
         freeXpathObject(xpathObj);
 
     } else if(format == 2) {
         xmlNode *change = NULL;
         int add[] = { 0, 0, 0 };
         int del[] = { 0, 0, 0 };
 
         xml_patch_versions(patchset, add, del);
 
         for (change = pcmk__xml_first_child(patchset); change != NULL;
              change = pcmk__xml_next(change)) {
             const char *op = crm_element_value(change, XML_DIFF_OP);
             const char *xpath = crm_element_value(change, XML_DIFF_PATH);
 
             if(op == NULL) {
                 continue;
 
             } else if(strstr(xpath, "/" XML_TAG_FENCING_LEVEL) != NULL) {
                 /* Change to a specific entry */
 
                 crm_trace("Handling %s operation %d.%d.%d for %s", op, add[0], add[1], add[2], xpath);
                 if(strcmp(op, "move") == 0) {
                     continue;
 
                 } else if(strcmp(op, "create") == 0) {
                     add_topology_level(change->children);
 
                 } else if(strcmp(op, "modify") == 0) {
                     xmlNode *match = first_named_child(change, XML_DIFF_RESULT);
 
                     if(match) {
                         remove_topology_level(match->children);
                         add_topology_level(match->children);
                     }
 
                 } else if(strcmp(op, "delete") == 0) {
                     /* Nuclear option, all we have is the path and an id... not enough to remove a specific entry */
                     crm_info("Re-initializing fencing topology after %s operation %d.%d.%d for %s",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else if (strstr(xpath, "/" XML_TAG_FENCING_TOPOLOGY) != NULL) {
                 /* Change to the topology in general */
                 crm_info("Re-initializing fencing topology after top-level %s operation  %d.%d.%d for %s",
                          op, add[0], add[1], add[2], xpath);
                 fencing_topology_init();
                 return;
 
             } else if (strstr(xpath, "/" XML_CIB_TAG_CONFIGURATION)) {
                 /* Changes to the whole config section, possibly including the topology as a whild */
                 if(first_named_child(change, XML_TAG_FENCING_TOPOLOGY) == NULL) {
                     crm_trace("Nothing for us in %s operation %d.%d.%d for %s.",
                               op, add[0], add[1], add[2], xpath);
 
                 } else if(strcmp(op, "delete") == 0 || strcmp(op, "create") == 0) {
                     crm_info("Re-initializing fencing topology after top-level %s operation %d.%d.%d for %s.",
                              op, add[0], add[1], add[2], xpath);
                     fencing_topology_init();
                     return;
                 }
 
             } else {
                 crm_trace("Nothing for us in %s operation %d.%d.%d for %s",
                           op, add[0], add[1], add[2], xpath);
             }
         }
 
     } else {
         crm_warn("Unknown patch format: %d", format);
     }
 }
 static bool have_cib_devices = FALSE;
 
 static void
 update_cib_cache_cb(const char *event, xmlNode * msg)
 {
     int rc = pcmk_ok;
     long timeout_ms_saved = stonith_watchdog_timeout_ms;
     bool need_full_refresh = false;
 
     if(!have_cib_devices) {
         crm_trace("Skipping updates until we get a full dump");
         return;
 
     } else if(msg == NULL) {
         crm_trace("Missing %s update", event);
         return;
     }
 
     /* Maintain a local copy of the CIB so that we have full access
      * to device definitions, location constraints, and node attributes
      */
     if (local_cib != NULL) {
         int rc = pcmk_ok;
         xmlNode *patchset = NULL;
 
         crm_element_value_int(msg, F_CIB_RC, &rc);
         if (rc != pcmk_ok) {
             return;
         }
 
         patchset = get_message_xml(msg, F_CIB_UPDATE_RESULT);
         pcmk__output_set_log_level(logger_out, LOG_TRACE);
         out->message(out, "xml-patchset", patchset);
         rc = xml_apply_patchset(local_cib, patchset, TRUE);
         switch (rc) {
             case pcmk_ok:
             case -pcmk_err_old_data:
                 break;
             case -pcmk_err_diff_resync:
             case -pcmk_err_diff_failed:
                 crm_notice("[%s] Patch aborted: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
                 break;
             default:
                 crm_warn("[%s] ABORTED: %s (%d)", event, pcmk_strerror(rc), rc);
                 free_xml(local_cib);
                 local_cib = NULL;
         }
     }
 
     if (local_cib == NULL) {
         crm_trace("Re-requesting full CIB");
         rc = cib_api->cmds->query(cib_api, NULL, &local_cib, cib_scope_local | cib_sync_call);
         if(rc != pcmk_ok) {
             crm_err("Couldn't retrieve the CIB: %s (%d)", pcmk_strerror(rc), rc);
             return;
         }
         CRM_ASSERT(local_cib != NULL);
         need_full_refresh = true;
     }
 
     pcmk__refresh_node_caches_from_cib(local_cib);
     update_stonith_watchdog_timeout_ms(local_cib);
 
     if (timeout_ms_saved != stonith_watchdog_timeout_ms) {
         need_full_refresh = true;
     }
 
     if (need_full_refresh) {
         fencing_topology_init();
         cib_devices_update();
     } else {
         // Partial refresh
         update_fencing_topology(event, msg);
         update_cib_stonith_devices(event, msg);
     }
 
     watchdog_device_update();
 }
 
 static void
 init_cib_cache_cb(xmlNode * msg, int call_id, int rc, xmlNode * output, void *user_data)
 {
     crm_info("Updating device list from CIB");
     have_cib_devices = TRUE;
     local_cib = copy_xml(output);
 
     pcmk__refresh_node_caches_from_cib(local_cib);
     update_stonith_watchdog_timeout_ms(local_cib);
 
     fencing_topology_init();
     cib_devices_update();
     watchdog_device_update();
 }
 
 static void
 stonith_shutdown(int nsig)
 {
     crm_info("Terminating with %d clients", pcmk__ipc_client_count());
     stonith_shutdown_flag = TRUE;
     if (mainloop != NULL && g_main_loop_is_running(mainloop)) {
         g_main_loop_quit(mainloop);
     }
 }
 
 static void
 cib_connection_destroy(gpointer user_data)
 {
     if (stonith_shutdown_flag) {
         crm_info("Connection to the CIB manager closed");
         return;
     } else {
         crm_crit("Lost connection to the CIB manager, shutting down");
     }
     if (cib_api) {
         cib_api->cmds->signoff(cib_api);
     }
     stonith_shutdown(0);
 }
 
 static void
 stonith_cleanup(void)
 {
     if (cib_api) {
         cib_api->cmds->del_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb);
         cib_api->cmds->signoff(cib_api);
     }
 
     if (ipcs) {
         qb_ipcs_destroy(ipcs);
     }
 
     crm_peer_destroy();
     pcmk__client_cleanup();
     free_stonith_remote_op_list();
     free_topology_list();
     free_device_list();
     free_metadata_cache();
     fenced_unregister_handlers();
 
     free(stonith_our_uname);
     stonith_our_uname = NULL;
 
     free_xml(local_cib);
     local_cib = NULL;
 }
 
 static gboolean
 stand_alone_cpg_cb(const gchar *option_name, const gchar *optarg, gpointer data,
                    GError **error)
 {
     stand_alone = FALSE;
     options.no_cib_connect = true;
     return TRUE;
 }
 
 static void
 setup_cib(void)
 {
     int rc, retries = 0;
 
     cib_api = cib_new();
     if (cib_api == NULL) {
         crm_err("No connection to the CIB manager");
         return;
     }
 
     do {
         sleep(retries);
         rc = cib_api->cmds->signon(cib_api, CRM_SYSTEM_STONITHD, cib_command);
     } while (rc == -ENOTCONN && ++retries < 5);
 
     if (rc != pcmk_ok) {
         crm_err("Could not connect to the CIB manager: %s (%d)", pcmk_strerror(rc), rc);
 
     } else if (pcmk_ok !=
                cib_api->cmds->add_notify_callback(cib_api, T_CIB_DIFF_NOTIFY, update_cib_cache_cb)) {
         crm_err("Could not set CIB notification callback");
 
     } else {
         rc = cib_api->cmds->query(cib_api, NULL, NULL, cib_scope_local);
         cib_api->cmds->register_callback(cib_api, rc, 120, FALSE, NULL, "init_cib_cache_cb",
                                          init_cib_cache_cb);
         cib_api->cmds->set_connection_dnotify(cib_api, cib_connection_destroy);
         crm_info("Watching for fencing topology changes");
     }
 }
 
 struct qb_ipcs_service_handlers ipc_callbacks = {
     .connection_accept = st_ipc_accept,
     .connection_created = NULL,
     .msg_process = st_ipc_dispatch,
     .connection_closed = st_ipc_closed,
     .connection_destroyed = st_ipc_destroy
 };
 
 /*!
  * \internal
  * \brief Callback for peer status changes
  *
  * \param[in] type  What changed
  * \param[in] node  What peer had the change
  * \param[in] data  Previous value of what changed
  */
 static void
 st_peer_update_callback(enum crm_status_type type, crm_node_t * node, const void *data)
 {
     if ((type != crm_status_processes)
         && !pcmk_is_set(node->flags, crm_remote_node)) {
         /*
          * This is a hack until we can send to a nodeid and/or we fix node name lookups
          * These messages are ignored in stonith_peer_callback()
          */
         xmlNode *query = create_xml_node(NULL, "stonith_command");
 
         crm_xml_add(query, F_XML_TAGNAME, "stonith_command");
         crm_xml_add(query, F_TYPE, T_STONITH_NG);
         crm_xml_add(query, F_STONITH_OPERATION, "poke");
 
         crm_debug("Broadcasting our uname because of node %u", node->id);
         send_cluster_message(NULL, crm_msg_stonith_ng, query, FALSE);
 
         free_xml(query);
     }
 }
 
 static pcmk__cluster_option_t fencer_options[] = {
     /* name, old name, type, allowed values,
      * default value, validator,
      * short description,
      * long description
      */
     {
         PCMK_STONITH_HOST_ARGUMENT, NULL, "string", NULL, "port", NULL,
         N_("Advanced use only: An alternate parameter to supply instead of 'port'"),
         N_("some devices do not support the "
            "standard 'port' parameter or may provide additional ones. Use "
            "this to specify an alternate, device-specific, parameter "
            "that should indicate the machine to be fenced. A value of "
            "none can be used to tell the cluster not to supply any "
            "additional parameters.")
     },
     {
         PCMK_STONITH_HOST_MAP,NULL, "string", NULL, "", NULL,
         N_("A mapping of host names to ports numbers for devices that do not support host names."),
         N_("Eg. node1:1;node2:2,3 would tell the cluster to use port 1 for node1 and ports 2 and 3 for node2")
     },
     {
         PCMK_STONITH_HOST_LIST,NULL, "string", NULL, "", NULL,
         N_("Eg. node1,node2,node3"),
         N_("A list of machines controlled by "
                "this device (Optional unless pcmk_host_list=static-list)")
     },
     {
         PCMK_STONITH_HOST_CHECK,NULL, "string", NULL, "dynamic-list", NULL,
         N_("How to determine which machines are controlled by the device."),
         N_("Allowed values: dynamic-list "
                "(query the device via the 'list' command), static-list "
                "(check the pcmk_host_list attribute), status "
                "(query the device via the 'status' command), "
                "none (assume every device can fence every "
                "machine)")
     },
     {
         PCMK_STONITH_DELAY_MAX,NULL, "time", NULL, "0s", NULL,
         N_("Enable a base delay for fencing actions and specify base delay value."),
         N_("Enable a delay of no more than the "
                "time specified before executing fencing actions. Pacemaker "
                "derives the overall delay by taking the value of "
                "pcmk_delay_base and adding a random delay value such "
                "that the sum is kept below this maximum.")
     },
     {
         PCMK_STONITH_DELAY_BASE,NULL, "string", NULL, "0s", NULL,
         N_("Enable a base delay for "
                "fencing actions and specify base delay value."),
         N_("This enables a static delay for "
                "fencing actions, which can help avoid \"death matches\" where "
                "two nodes try to fence each other at the same time. If "
                "pcmk_delay_max  is also used, a random delay will be "
                "added such that the total delay is kept below that value."
                "This can be set to a single time value to apply to any node "
                "targeted by this device (useful if a separate device is "
                "configured for each target), or to a node map (for example, "
                "\"node1:1s;node2:5\") to set a different value per target.")
     },
     {
         PCMK_STONITH_ACTION_LIMIT,NULL, "integer", NULL, "1", NULL,
         N_("The maximum number of actions can be performed in parallel on this device"),
         N_("Cluster property concurrent-fencing=true needs to be configured first."
              "Then use this to specify the maximum number of actions can be performed in parallel on this device. -1 is unlimited.")
     },
     {
         "pcmk_reboot_action", NULL, "string", NULL,
         PCMK_ACTION_REBOOT, NULL,
         N_("Advanced use only: An alternate command to run instead of 'reboot'"),
         N_("Some devices do not support the standard commands or may provide additional ones.\n"
            "Use this to specify an alternate, device-specific, command that implements the \'reboot\' action.")
     },
     {
 	"pcmk_reboot_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for reboot actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'reboot\' actions.")
     },
     {
 	"pcmk_reboot_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the 'reboot' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'reboot\' actions before giving up.")
     },
     {
         "pcmk_off_action", NULL, "string", NULL,
         PCMK_ACTION_OFF, NULL,
         N_("Advanced use only: An alternate command to run instead of \'off\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
            "Use this to specify an alternate, device-specific, command that implements the \'off\' action.")
     },
     {
 	"pcmk_off_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for off actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'off\' actions.")
     },
     {
 	"pcmk_off_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the 'off' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'off\' actions before giving up.")
     },
     {
         "pcmk_on_action", NULL, "string", NULL,
         PCMK_ACTION_ON, NULL,
         N_("Advanced use only: An alternate command to run instead of 'on'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
            "Use this to specify an alternate, device-specific, command that implements the \'on\' action.")
     },
     {
 	"pcmk_on_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for on actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'on\' actions.")
     },
     {
 	"pcmk_on_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the 'on' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'on\' actions before giving up.")
     },
     {
         "pcmk_list_action",NULL, "string", NULL,
         PCMK_ACTION_LIST, NULL,
         N_("Advanced use only: An alternate command to run instead of \'list\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
            "Use this to specify an alternate, device-specific, command that implements the \'list\' action.")
     },
     {
 	"pcmk_list_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for list actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'list\' actions.")
     },
     {
 	"pcmk_list_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the \'list\' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'list\' actions before giving up.")
     },
     {
         "pcmk_monitor_action", NULL, "string", NULL,
         PCMK_ACTION_MONITOR, NULL,
 	N_("Advanced use only: An alternate command to run instead of \'monitor\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
                  "Use this to specify an alternate, device-specific, command that implements the \'monitor\' action.")
     },
     {
 	"pcmk_monitor_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for monitor actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal.\n"
 	   "Use this to specify an alternate, device-specific, timeout for \'monitor\' actions.")
     },
     {
 	"pcmk_monitor_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the \'monitor\' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'monitor\' actions before giving up.")
     },
     {
         "pcmk_status_action", NULL, "string", NULL,
         PCMK_ACTION_STATUS, NULL,
         N_("Advanced use only: An alternate command to run instead of \'status\'"),
         N_("Some devices do not support the standard commands or may provide additional ones."
            "Use this to specify an alternate, device-specific, command that implements the \'status\' action.")
     },
     {
 	"pcmk_status_timeout",NULL, "time", NULL, "60s", NULL,
 	N_("Advanced use only: Specify an alternate timeout to use for status actions instead of stonith-timeout"),
         N_("Some devices need much more/less time to complete than normal."
 	   "Use this to specify an alternate, device-specific, timeout for \'status\' actions.")
     },
     {
 	"pcmk_status_retries",NULL, "integer", NULL, "2", NULL,
 	N_("Advanced use only: The maximum number of times to retry the \'status\' command within the timeout period"),
         N_("Some devices do not support multiple connections."
            " Operations may 'fail' if the device is busy with another task so Pacemaker will automatically retry the operation,      if there is time remaining."
            " Use this option to alter the number of times Pacemaker retries \'status\' actions before giving up.")
     },
 };
 
 void
 fencer_metadata(void)
 {
     const char *desc_short = N_("Instance attributes available for all "
                              "\"stonith\"-class resources");
     const char *desc_long = N_("Instance attributes available for all \"stonith\"-"
                             "class resources and used by Pacemaker's fence "
                             "daemon, formerly known as stonithd");
 
     gchar *s = pcmk__format_option_metadata("pacemaker-fenced", desc_short,
                                             desc_long, fencer_options,
                                             PCMK__NELEM(fencer_options));
     printf("%s", s);
     g_free(s);
 }
 
 static GOptionEntry entries[] = {
     { "stand-alone", 's', G_OPTION_FLAG_NONE, G_OPTION_ARG_NONE, &stand_alone,
       N_("Deprecated (will be removed in a future release)"), NULL },
 
     { "stand-alone-w-cpg", 'c', G_OPTION_FLAG_NO_ARG, G_OPTION_ARG_CALLBACK,
       stand_alone_cpg_cb, N_("Intended for use in regression testing only"), NULL },
 
     { "logfile", 'l', G_OPTION_FLAG_NONE, G_OPTION_ARG_FILENAME_ARRAY,
       &options.log_files, N_("Send logs to the additional named logfile"), NULL },
 
     { NULL }
 };
 
 static GOptionContext *
 build_arg_context(pcmk__common_args_t *args, GOptionGroup **group)
 {
     GOptionContext *context = NULL;
 
     context = pcmk__build_arg_context(args, "text (default), xml", group,
                                       "[metadata]");
     pcmk__add_main_args(context, entries);
     return context;
 }
 
 int
 main(int argc, char **argv)
 {
     int rc = pcmk_rc_ok;
     crm_cluster_t *cluster = NULL;
     crm_ipc_t *old_instance = NULL;
 
     GError *error = NULL;
 
     GOptionGroup *output_group = NULL;
     pcmk__common_args_t *args = pcmk__new_common_args(SUMMARY);
     gchar **processed_args = pcmk__cmdline_preproc(argv, "l");
     GOptionContext *context = build_arg_context(args, &output_group);
 
     crm_log_preinit(NULL, argc, argv);
 
     pcmk__register_formats(output_group, formats);
     if (!g_option_context_parse_strv(context, &processed_args, &error)) {
         exit_code = CRM_EX_USAGE;
         goto done;
     }
 
     rc = pcmk__output_new(&out, args->output_ty, args->output_dest, argv);
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_ERROR;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Error creating output format %s: %s",
                     args->output_ty, pcmk_rc_str(rc));
         goto done;
     }
 
     if (args->version) {
         out->version(out, false);
         goto done;
     }
 
     if ((g_strv_length(processed_args) >= 2)
         && pcmk__str_eq(processed_args[1], "metadata", pcmk__str_none)) {
         fencer_metadata();
         goto done;
     }
 
     // Open additional log files
     pcmk__add_logfiles(options.log_files, out);
 
     crm_log_init(NULL, LOG_INFO + args->verbosity, TRUE,
                  (args->verbosity > 0), argc, argv, FALSE);
 
     crm_notice("Starting Pacemaker fencer");
 
     old_instance = crm_ipc_new("stonith-ng", 0);
     if (old_instance == NULL) {
         /* crm_ipc_new() will have already logged an error message with
          * crm_err()
          */
         exit_code = CRM_EX_FATAL;
         goto done;
     }
 
     if (pcmk__connect_generic_ipc(old_instance) == pcmk_rc_ok) {
         // IPC endpoint already up
         crm_ipc_close(old_instance);
         crm_ipc_destroy(old_instance);
         crm_err("pacemaker-fenced is already active, aborting startup");
         goto done;
     } else {
         // Not up or not authentic, we'll proceed either way
         crm_ipc_destroy(old_instance);
         old_instance = NULL;
     }
 
     mainloop_add_signal(SIGTERM, stonith_shutdown);
 
     crm_peer_init();
 
     fenced_data_set = pe_new_working_set();
     CRM_ASSERT(fenced_data_set != NULL);
 
     cluster = pcmk_cluster_new();
 
     /* Initialize the logger prior to setup_cib(). update_cib_cache_cb() may
      * call the "xml-patchset" message function, which needs the logger, after
      * setup_cib() has run.
      */
     rc = pcmk__log_output_new(&logger_out) != pcmk_rc_ok;
     if (rc != pcmk_rc_ok) {
         exit_code = CRM_EX_FATAL;
         g_set_error(&error, PCMK__EXITC_ERROR, exit_code,
                     "Error creating output format log: %s", pcmk_rc_str(rc));
         goto done;
     }
     pe__register_messages(logger_out);
     pcmk__register_lib_messages(logger_out);
     pcmk__output_set_log_level(logger_out, LOG_TRACE);
     fenced_data_set->priv = logger_out;
 
     if (!stand_alone) {
 #if SUPPORT_COROSYNC
         if (is_corosync_cluster()) {
             cluster->destroy = stonith_peer_cs_destroy;
             cluster->cpg.cpg_deliver_fn = stonith_peer_ais_callback;
             cluster->cpg.cpg_confchg_fn = pcmk_cpg_membership;
         }
 #endif // SUPPORT_COROSYNC
 
         crm_set_status_callback(&st_peer_update_callback);
 
         if (crm_cluster_connect(cluster) == FALSE) {
             exit_code = CRM_EX_FATAL;
             crm_crit("Cannot sign in to the cluster... terminating");
             goto done;
         }
         pcmk__str_update(&stonith_our_uname, cluster->uname);
 
         if (!options.no_cib_connect) {
             setup_cib();
         }
 
     } else {
         pcmk__str_update(&stonith_our_uname, "localhost");
         crm_warn("Stand-alone mode is deprecated and will be removed "
                  "in a future release");
     }
 
     init_device_list();
     init_topology_list();
 
     pcmk__serve_fenced_ipc(&ipcs, &ipc_callbacks);
 
     // Create the mainloop and run it...
     mainloop = g_main_loop_new(NULL, FALSE);
     crm_notice("Pacemaker fencer successfully started and accepting connections");
     g_main_loop_run(mainloop);
 
 done:
     g_strfreev(processed_args);
     pcmk__free_arg_context(context);
 
     g_strfreev(options.log_files);
 
     stonith_cleanup();
     pcmk_cluster_free(cluster);
     pe_free_working_set(fenced_data_set);
 
     pcmk__output_and_clear_error(&error, out);
 
     if (logger_out != NULL) {
         logger_out->finish(logger_out, exit_code, true, NULL);
         pcmk__output_free(logger_out);
     }
 
     if (out != NULL) {
         out->finish(out, exit_code, true, NULL);
         pcmk__output_free(out);
     }
 
     pcmk__unregister_formats();
     crm_exit(exit_code);
 }
diff --git a/include/crm/common/resources.h b/include/crm/common/resources.h
index 3754f9f307..7b74284026 100644
--- a/include/crm/common/resources.h
+++ b/include/crm/common/resources.h
@@ -1,85 +1,86 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_COMMON_RESOURCES__H
 #  define PCMK__CRM_COMMON_RESOURCES__H
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /*!
  * \file
  * \brief Scheduler API for resources
  * \ingroup core
  */
 
 //! Resource variants supported by Pacemaker
 enum pe_obj_types {
     // Order matters: some code compares greater or lesser than
     pcmk_rsc_variant_unknown    = -1,   //!< Unknown resource variant
     pcmk_rsc_variant_primitive  = 0,    //!< Primitive resource
+    pcmk_rsc_variant_group      = 1,    //!< Group resource
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
     //! \deprecated Use pcmk_rsc_variant_unknown instead
     pe_unknown      = pcmk_rsc_variant_unknown,
 
     //! \deprecated Use pcmk_rsc_variant_primitive instead
     pe_native       = pcmk_rsc_variant_primitive,
 #endif
-    pe_group = 1,
+    pe_group        = pcmk_rsc_variant_group,
     pe_clone = 2,
     pe_container = 3,
 };
 
 //! What resource needs before it can be recovered from a failed node
 enum rsc_start_requirement {
     pcmk_requires_nothing   = 0,    //!< Resource can be recovered immediately
     pcmk_requires_quorum    = 1,    //!< Resource can be recovered if quorate
     pcmk_requires_fencing   = 2,    //!< Resource can be recovered after fencing
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
     //! \deprecated Use pcmk_requires_nothing instead
     rsc_req_nothing         = pcmk_requires_nothing,
 
     //! \deprecated Use pcmk_requires_quorum instead
     rsc_req_quorum          = pcmk_requires_quorum,
 
     //! \deprecated Use pcmk_requires_fencing instead
     rsc_req_stonith         = pcmk_requires_fencing,
 #endif
 };
 
 //! How to recover a resource that is incorrectly active on multiple nodes
 enum rsc_recovery_type {
     pcmk_multiply_active_restart    = 0,    //!< Stop on all, start on desired
     pcmk_multiply_active_stop       = 1,    //!< Stop on all and leave stopped
     pcmk_multiply_active_block      = 2,    //!< Do nothing to resource
     pcmk_multiply_active_unexpected = 3,    //!< Stop unexpected instances
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
     //! \deprecated Use pcmk_multiply_active_restart instead
     recovery_stop_start             = pcmk_multiply_active_restart,
 
     //! \deprecated Use pcmk_multiply_active_stop instead
     recovery_stop_only              = pcmk_multiply_active_stop,
 
     //! \deprecated Use pcmk_multiply_active_block instead
     recovery_block                  = pcmk_multiply_active_block,
 
     //! \deprecated Use pcmk_multiply_active_unexpected instead
     recovery_stop_unexpected        = pcmk_multiply_active_unexpected,
 #endif
 };
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PCMK__CRM_COMMON_RESOURCES__H
diff --git a/include/crm/compatibility.h b/include/crm/compatibility.h
index 96232aaf18..f4f0d94ee3 100644
--- a/include/crm/compatibility.h
+++ b/include/crm/compatibility.h
@@ -1,243 +1,243 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 #ifndef PCMK__CRM_COMPATIBILITY__H
 #  define PCMK__CRM_COMPATIBILITY__H
 
 #include <crm/msg_xml.h>
 #include <crm/pengine/pe_types.h> // enum pe_obj_types
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /* This file allows external code that uses Pacemaker libraries to transition
  * more easily from old APIs to current ones. Any code that compiled with an
  * earlier API but not with the current API can include this file and have a
  * good chance of compiling again.
  *
  * Everything here is deprecated and will be removed at the next major Pacemaker
  * release (i.e. 3.0), so it should only be used during a transitionary period
  * while the external code is being updated to the current API.
  */
 
 /* Heartbeat-specific definitions. Support for heartbeat has been removed
  * entirely, so any code branches relying on these should be deleted.
  */
 #define ACTIVESTATUS "active"
 #define DEADSTATUS "dead"
 #define PINGSTATUS "ping"
 #define JOINSTATUS "join"
 #define LEAVESTATUS "leave"
 #define NORMALNODE "normal"
 #define CRM_NODE_EVICTED "evicted"
 #define CRM_LEGACY_CONFIG_DIR "/var/lib/heartbeat/crm"
 #define HA_VARLIBHBDIR "/var/lib/heartbeat"
 #define pcmk_cluster_heartbeat 0x0004
 
 /* Corosync-version-1-specific definitions */
 
 /* Support for corosync version 1 has been removed entirely, so any code
  * branches relying on these should be deleted.
  */
 #define PCMK_SERVICE_ID 9
 #define CRM_SERVICE PCMK_SERVICE_ID
 #define XML_ATTR_EXPECTED_VOTES "expected-quorum-votes"
 #define crm_class_members 1
 #define crm_class_notify 2
 #define crm_class_nodeid 3
 #define crm_class_rmpeer 4
 #define crm_class_quorum 5
 #define pcmk_cluster_classic_ais 0x0010
 #define pcmk_cluster_cman 0x0040
 #define ais_fd_sync -1
 
 // These are always true now
 #define CS_USES_LIBQB 1
 #define HAVE_CMAP 1
 #define SUPPORT_CS_QUORUM 1
 #define SUPPORT_AIS 1
 #define AIS_COROSYNC 1
 
 // These are always false now
 #define HAVE_CONFDB 0
 #define SUPPORT_CMAN 0
 #define SUPPORT_PLUGIN 0
 #define SUPPORT_STONITH_CONFIG 0
 #define is_classic_ais_cluster() 0
 #define is_cman_cluster() 0
 
 // These have newer names
 #define is_openais_cluster() is_corosync_cluster()
 #if SUPPORT_COROSYNC
 #define SUPPORT_CS
 #endif
 
 /* Isolation-specific definitions. Support for the resource isolation feature
  * has been removed * entirely, so any code branches relying on these should be
  * deleted.
  */
 #define XML_RSC_ATTR_ISOLATION_INSTANCE "isolation-instance"
 #define XML_RSC_ATTR_ISOLATION_WRAPPER "isolation-wrapper"
 #define XML_RSC_ATTR_ISOLATION_HOST "isolation-host"
 #define XML_RSC_ATTR_ISOLATION "isolation"
 
 /* Schema-related definitions */
 
 // This has been renamed
 #define CRM_DTD_DIRECTORY CRM_SCHEMA_DIRECTORY
 
 /* Exit-code-related definitions */
 
 #define DAEMON_RESPAWN_STOP CRM_EX_FATAL
 #define pcmk_err_panic      CRM_EX_PANIC
 
 // Deprecated symbols that were removed
 #define APPNAME_LEN         256
 #define CRM_NODE_ACTIVE             CRM_NODE_MEMBER
 #define CRM_OP_DIE          "die_no_respawn"
 #define CRM_OP_RETRIVE_CIB  "retrieve_cib"
 #define CRM_OP_HBEAT        "dc_beat"
 #define CRM_OP_ABORT        "abort"
 #define CRM_OP_DEBUG_UP     "debug_inc"
 #define CRM_OP_DEBUG_DOWN   "debug_dec"
 #define CRM_OP_EVENTCC      "event_cc"
 #define CRM_OP_TEABORT      "te_abort"
 #define CRM_OP_TEABORTED    "te_abort_confirmed"
 #define CRM_OP_TE_HALT      "te_halt"
 #define CRM_OP_TECOMPLETE   "te_complete"
 #define CRM_OP_TETIMEOUT    "te_timeout"
 #define CRM_OP_TRANSITION   "transition"
 #define CRM_OP_NODES_PROBED "probe_nodes_complete"
 #define DOT_ALL_FSA_INPUTS  1
 #define DOT_FSA_ACTIONS     1
 #define F_LRMD_CANCEL_CALLID        "lrmd_cancel_callid"
 #define F_LRMD_RSC_METADATA         "lrmd_rsc_metadata_res"
 #define F_LRMD_IPC_PROXY_NODE       "lrmd_ipc_proxy_node"
 #define INSTANCE(x)                 crm_element_value(x, XML_CIB_ATTR_INSTANCE)
 #define LOG_DEBUG_2  LOG_TRACE
 #define LOG_DEBUG_3  LOG_TRACE
 #define LOG_DEBUG_4  LOG_TRACE
 #define LOG_DEBUG_5  LOG_TRACE
 #define LOG_DEBUG_6  LOG_TRACE
 #define LRMD_OP_RSC_CHK_REG         "lrmd_rsc_check_register"
 #define MAX_IPC_FAIL                5
 #define NAME(x)                     crm_element_value(x, XML_NVPAIR_ATTR_NAME)
 #define MSG_LOG                     1
 #define PE_OBJ_T_NATIVE             "native"
 #define PE_OBJ_T_GROUP              "group"
 #define PE_OBJ_T_INCARNATION        "clone"
 #define PE_OBJ_T_MASTER             "master"
 #define SERVICE_SCRIPT              "/sbin/service"
 #define SOCKET_LEN                  1024
 #define TSTAMP(x)                   crm_element_value(x, XML_ATTR_TSTAMP)
 #define XML_ATTR_TAGNAME            F_XML_TAGNAME
 #define XML_ATTR_FILTER_TYPE        "type-filter"
 #define XML_ATTR_FILTER_ID          "id-filter"
 #define XML_ATTR_FILTER_PRIORITY    "priority-filter"
 #define XML_ATTR_DC                 "is_dc"
 #define XML_MSG_TAG                 "crm_message"
 #define XML_MSG_TAG_DATA            "msg_data"
 #define XML_FAIL_TAG_RESOURCE       "failed_resource"
 #define XML_FAILRES_ATTR_RESID      "resource_id"
 #define XML_FAILRES_ATTR_REASON     "reason"
 #define XML_FAILRES_ATTR_RESSTATUS  "resource_status"
 #define XML_ATTR_RESULT             "result"
 #define XML_ATTR_SECTION            "section"
 #define XML_CIB_TAG_DOMAIN          "domain"
 #define XML_CIB_TAG_CONSTRAINT      "constraint"
 #define XML_RSC_ATTR_STATE          "clone-state"
 #define XML_RSC_ATTR_PRIORITY       "priority"
 #define XML_OP_ATTR_DEPENDENT       "dependent-on"
 #define XML_LRM_TAG_AGENTS          "lrm_agents"
 #define XML_LRM_TAG_AGENT           "lrm_agent"
 #define XML_LRM_TAG_ATTRIBUTES      "attributes"
 #define XML_CIB_ATTR_HEALTH         "health"
 #define XML_CIB_ATTR_WEIGHT         "weight"
 #define XML_CIB_ATTR_CLEAR          "clear_on"
 #define XML_CIB_ATTR_STONITH        "stonith"
 #define XML_CIB_ATTR_STANDBY        "standby"
 #define XML_RULE_ATTR_SCORE_MANGLED "score-attribute-mangled"
 #define XML_RULE_ATTR_RESULT        "result"
 #define XML_NODE_ATTR_STATE         "state"
 #define XML_ATTR_LRM_PROBE          "lrm-is-probe"
 #define XML_ATTR_TE_ALLOWFAIL       "op_allow_fail"
 #define VALUE(x)                    crm_element_value(x, XML_NVPAIR_ATTR_VALUE)
 #define action_wrapper_s            pe_action_wrapper_s
 #define add_cib_op_callback(cib, id, flag, data, fn) do {                \
         cib->cmds->register_callback(cib, id, 120, flag, data, #fn, fn); \
     } while(0)
 #define cib_default_options = cib_none
 #define crm_remote_baremetal              0x0004
 #define crm_remote_container              0x0002
 #define crm_element_value_const           crm_element_value
 #define crm_element_value_const_int       crm_element_value_int
 #define n_object_classes                  3
 #define no_quorum_policy_e                pe_quorum_policy
 #define node_s                            pe_node_s
 #define node_shared_s                     pe_node_shared_s
 #define pe_action_failure_is_fatal        0x00020
 #define pe_rsc_munging                    0x00000800ULL
 #define pe_rsc_try_reload                 0x00001000ULL
 #define pe_rsc_shutdown                   0x00020000ULL
 #define pe_rsc_migrating                  0x00400000ULL
 #define pe_rsc_unexpectedly_running       0x02000000ULL
 #define pe_rsc_have_unfencing             0x80000000ULL
 #define resource_s                        pe_resource_s
 #define ticket_s                          pe_ticket_s
 
 #define node_score_infinity 1000000
 
 /* Clone terminology definitions */
 
 // These can no longer be used in a switch together
 #define pe_master pe_clone
 
 static inline enum pe_obj_types
 get_resource_type(const char *name)
 {
     if (safe_str_eq(name, XML_CIB_TAG_RESOURCE)) {
         return pcmk_rsc_variant_primitive;
 
     } else if (safe_str_eq(name, XML_CIB_TAG_GROUP)) {
-        return pe_group;
+        return pcmk_rsc_variant_group;
 
     } else if (safe_str_eq(name, XML_CIB_TAG_INCARNATION)
                 || safe_str_eq(name, PCMK_XE_PROMOTABLE_LEGACY)) {
         return pe_clone;
 
     } else if (safe_str_eq(name, XML_CIB_TAG_CONTAINER)) {
         return pe_container;
     }
 
     return pcmk_rsc_variant_unknown;
 }
 
 static inline const char *
 get_resource_typename(enum pe_obj_types type)
 {
     switch (type) {
         case pcmk_rsc_variant_primitive:
             return XML_CIB_TAG_RESOURCE;
-        case pe_group:
+        case pcmk_rsc_variant_group:
             return XML_CIB_TAG_GROUP;
         case pe_clone:
             return XML_CIB_TAG_INCARNATION;
         case pe_container:
             return XML_CIB_TAG_CONTAINER;
         case pcmk_rsc_variant_unknown:
             return "unknown";
     }
     return "<unknown>";
 }
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif
diff --git a/lib/pacemaker/pcmk_sched_actions.c b/lib/pacemaker/pcmk_sched_actions.c
index 5d539efb0b..35850dbfb7 100644
--- a/lib/pacemaker/pcmk_sched_actions.c
+++ b/lib/pacemaker/pcmk_sched_actions.c
@@ -1,1932 +1,1933 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <sys/param.h>
 #include <glib.h>
 
 #include <crm/lrmd_internal.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Get the action flags relevant to ordering constraints
  *
  * \param[in,out] action  Action to check
  * \param[in]     node    Node that *other* action in the ordering is on
  *                        (used only for clone resource actions)
  *
  * \return Action flags that should be used for orderings
  */
 static uint32_t
 action_flags_for_ordering(pe_action_t *action, const pe_node_t *node)
 {
     bool runnable = false;
     uint32_t flags;
 
     // For non-resource actions, return the action flags
     if (action->rsc == NULL) {
         return action->flags;
     }
 
     /* For non-clone resources, or a clone action not assigned to a node,
      * return the flags as determined by the resource method without a node
      * specified.
      */
     flags = action->rsc->cmds->action_flags(action, NULL);
     if ((node == NULL) || !pe_rsc_is_clone(action->rsc)) {
         return flags;
     }
 
     /* Otherwise (i.e., for clone resource actions on a specific node), first
      * remember whether the non-node-specific action is runnable.
      */
     runnable = pcmk_is_set(flags, pe_action_runnable);
 
     // Then recheck the resource method with the node
     flags = action->rsc->cmds->action_flags(action, node);
 
     /* For clones in ordering constraints, the node-specific "runnable" doesn't
      * matter, just the non-node-specific setting (i.e., is the action runnable
      * anywhere).
      *
      * This applies only to runnable, and only for ordering constraints. This
      * function shouldn't be used for other types of constraints without
      * changes. Not very satisfying, but it's logical and appears to work well.
      */
     if (runnable && !pcmk_is_set(flags, pe_action_runnable)) {
         pe__set_raw_action_flags(flags, action->rsc->id,
                                  pe_action_runnable);
     }
     return flags;
 }
 
 /*!
  * \internal
  * \brief Get action UUID that should be used with a resource ordering
  *
  * When an action is ordered relative to an action for a collective resource
  * (clone, group, or bundle), it actually needs to be ordered after all
  * instances of the collective have completed the relevant action (for example,
  * given "start CLONE then start RSC", RSC must wait until all instances of
  * CLONE have started). Given the UUID and resource of the first action in an
  * ordering, this returns the UUID of the action that should actually be used
  * for ordering (for example, "CLONE_started_0" instead of "CLONE_start_0").
  *
  * \param[in] first_uuid    UUID of first action in ordering
  * \param[in] first_rsc     Resource of first action in ordering
  *
  * \return Newly allocated copy of UUID to use with ordering
  * \note It is the caller's responsibility to free the return value.
  */
 static char *
 action_uuid_for_ordering(const char *first_uuid, const pe_resource_t *first_rsc)
 {
     guint interval_ms = 0;
     char *uuid = NULL;
     char *rid = NULL;
     char *first_task_str = NULL;
     enum action_tasks first_task = pcmk_action_unspecified;
     enum action_tasks remapped_task = pcmk_action_unspecified;
 
     // Only non-notify actions for collective resources need remapping
     if ((strstr(first_uuid, PCMK_ACTION_NOTIFY) != NULL)
-        || (first_rsc->variant < pe_group)) {
+        || (first_rsc->variant < pcmk_rsc_variant_group)) {
         goto done;
     }
 
     // Only non-recurring actions need remapping
     CRM_ASSERT(parse_op_key(first_uuid, &rid, &first_task_str, &interval_ms));
     if (interval_ms > 0) {
         goto done;
     }
 
     first_task = text2task(first_task_str);
     switch (first_task) {
         case pcmk_action_stop:
         case pcmk_action_start:
         case pcmk_action_notify:
         case pcmk_action_promote:
         case pcmk_action_demote:
             remapped_task = first_task + 1;
             break;
         case pcmk_action_stopped:
         case pcmk_action_started:
         case pcmk_action_notified:
         case pcmk_action_promoted:
         case pcmk_action_demoted:
             remapped_task = first_task;
             break;
         case pcmk_action_monitor:
         case pcmk_action_shutdown:
         case pcmk_action_fence:
             break;
         default:
             crm_err("Unknown action '%s' in ordering", first_task_str);
             break;
     }
 
     if (remapped_task != pcmk_action_unspecified) {
         /* If a (clone) resource has notifications enabled, we want to order
          * relative to when all notifications have been sent for the remapped
          * task. Only outermost resources or those in bundles have
          * notifications.
          */
         if (pcmk_is_set(first_rsc->flags, pe_rsc_notify)
             && ((first_rsc->parent == NULL)
                 || (pe_rsc_is_clone(first_rsc)
                     && (first_rsc->parent->variant == pe_container)))) {
             uuid = pcmk__notify_key(rid, "confirmed-post",
                                     task2text(remapped_task));
         } else {
             uuid = pcmk__op_key(rid, task2text(remapped_task), 0);
         }
         pe_rsc_trace(first_rsc,
                      "Remapped action UUID %s to %s for ordering purposes",
                      first_uuid, uuid);
     }
 
 done:
     if (uuid == NULL) {
         uuid = strdup(first_uuid);
         CRM_ASSERT(uuid != NULL);
     }
     free(first_task_str);
     free(rid);
     return uuid;
 }
 
 /*!
  * \internal
  * \brief Get actual action that should be used with an ordering
  *
  * When an action is ordered relative to an action for a collective resource
  * (clone, group, or bundle), it actually needs to be ordered after all
  * instances of the collective have completed the relevant action (for example,
  * given "start CLONE then start RSC", RSC must wait until all instances of
  * CLONE have started). Given the first action in an ordering, this returns the
  * the action that should actually be used for ordering (for example, the
  * started action instead of the start action).
  *
  * \param[in] action  First action in an ordering
  *
  * \return Actual action that should be used for the ordering
  */
 static pe_action_t *
 action_for_ordering(pe_action_t *action)
 {
     pe_action_t *result = action;
     pe_resource_t *rsc = action->rsc;
 
-    if ((rsc != NULL) && (rsc->variant >= pe_group) && (action->uuid != NULL)) {
+    if ((rsc != NULL) && (rsc->variant >= pcmk_rsc_variant_group)
+        && (action->uuid != NULL)) {
         char *uuid = action_uuid_for_ordering(action->uuid, rsc);
 
         result = find_first_action(rsc->actions, uuid, NULL, NULL);
         if (result == NULL) {
             crm_warn("Not remapping %s to %s because %s does not have "
                      "remapped action", action->uuid, uuid, rsc->id);
             result = action;
         }
         free(uuid);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Wrapper for update_ordered_actions() method for readability
  *
  * \param[in,out] rsc       Resource to call method for
  * \param[in,out] first     'First' action in an ordering
  * \param[in,out] then      'Then' action in an ordering
  * \param[in]     node      If not NULL, limit scope of ordering to this
  *                          node (only used when interleaving instances)
  * \param[in]     flags     Action flags for \p first for ordering purposes
  * \param[in]     filter    Action flags to limit scope of certain updates
  *                          (may include pe_action_optional to affect only
  *                          mandatory actions, and pe_action_runnable to
  *                          affect only runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  * \param[in,out] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 static inline uint32_t
 update(pe_resource_t *rsc, pe_action_t *first, pe_action_t *then,
        const pe_node_t *node, uint32_t flags, uint32_t filter, uint32_t type,
        pe_working_set_t *data_set)
 {
     return rsc->cmds->update_ordered_actions(first, then, node, flags, filter,
                                              type, data_set);
 }
 
 /*!
  * \internal
  * \brief Update flags for ordering's actions appropriately for ordering's flags
  *
  * \param[in,out] first        First action in an ordering
  * \param[in,out] then         Then action in an ordering
  * \param[in]     first_flags  Action flags for \p first for ordering purposes
  * \param[in]     then_flags   Action flags for \p then for ordering purposes
  * \param[in,out] order        Action wrapper for \p first in ordering
  * \param[in,out] data_set     Cluster working set
  *
  * \return Group of enum pcmk__updated flags
  */
 static uint32_t
 update_action_for_ordering_flags(pe_action_t *first, pe_action_t *then,
                                  uint32_t first_flags, uint32_t then_flags,
                                  pe_action_wrapper_t *order,
                                  pe_working_set_t *data_set)
 {
     uint32_t changed = pcmk__updated_none;
 
     /* The node will only be used for clones. If interleaved, node will be NULL,
      * otherwise the ordering scope will be limited to the node. Normally, the
      * whole 'then' clone should restart if 'first' is restarted, so then->node
      * is needed.
      */
     pe_node_t *node = then->node;
 
     if (pcmk_is_set(order->type, pe_order_implies_then_on_node)) {
         /* For unfencing, only instances of 'then' on the same node as 'first'
          * (the unfencing operation) should restart, so reset node to
          * first->node, at which point this case is handled like a normal
          * pe_order_implies_then.
          */
         pe__clear_order_flags(order->type, pe_order_implies_then_on_node);
         pe__set_order_flags(order->type, pe_order_implies_then);
         node = first->node;
         pe_rsc_trace(then->rsc,
                      "%s then %s: mapped pe_order_implies_then_on_node to "
                      "pe_order_implies_then on %s",
                      first->uuid, then->uuid, pe__node_name(node));
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_then)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node,
                               first_flags & pe_action_optional,
                               pe_action_optional, pe_order_implies_then,
                               data_set);
         } else if (!pcmk_is_set(first_flags, pe_action_optional)
                    && pcmk_is_set(then->flags, pe_action_optional)) {
             pe__clear_action_flags(then, pe_action_optional);
             pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_then",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_restart) && (then->rsc != NULL)) {
         enum pe_action_flags restart = pe_action_optional|pe_action_runnable;
 
         changed |= update(then->rsc, first, then, node, first_flags, restart,
                           pe_order_restart, data_set);
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_restart",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_first)) {
         if (first->rsc != NULL) {
             changed |= update(first->rsc, first, then, node, first_flags,
                               pe_action_optional, pe_order_implies_first,
                               data_set);
         } else if (!pcmk_is_set(first_flags, pe_action_optional)
                    && pcmk_is_set(first->flags, pe_action_runnable)) {
             pe__clear_action_flags(first, pe_action_runnable);
             pcmk__set_updated_flags(changed, first, pcmk__updated_first);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_implies_first",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_promoted_implies_first)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node,
                               first_flags & pe_action_optional,
                               pe_action_optional,
                               pe_order_promoted_implies_first, data_set);
         }
         pe_rsc_trace(then->rsc,
                      "%s then %s: %s after pe_order_promoted_implies_first",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_one_or_more)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pe_action_runnable, pe_order_one_or_more,
                               data_set);
 
         } else if (pcmk_is_set(first_flags, pe_action_runnable)) {
             // We have another runnable instance of "first"
             then->runnable_before++;
 
             /* Mark "then" as runnable if it requires a certain number of
              * "before" instances to be runnable, and they now are.
              */
             if ((then->runnable_before >= then->required_runnable_before)
                 && !pcmk_is_set(then->flags, pe_action_runnable)) {
 
                 pe__set_action_flags(then, pe_action_runnable);
                 pcmk__set_updated_flags(changed, first, pcmk__updated_then);
             }
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_one_or_more",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_probe) && (then->rsc != NULL)) {
         if (!pcmk_is_set(first_flags, pe_action_runnable)
             && (first->rsc->running_on != NULL)) {
 
             pe_rsc_trace(then->rsc,
                          "%s then %s: ignoring because first is stopping",
                          first->uuid, then->uuid);
             order->type = pe_order_none;
         } else {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pe_action_runnable, pe_order_runnable_left,
                               data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_probe",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_runnable_left)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pe_action_runnable, pe_order_runnable_left,
                               data_set);
 
         } else if (!pcmk_is_set(first_flags, pe_action_runnable)
                    && pcmk_is_set(then->flags, pe_action_runnable)) {
 
             pe__clear_action_flags(then, pe_action_runnable);
             pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_runnable_left",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_first_migratable)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pe_action_optional,
                               pe_order_implies_first_migratable, data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after "
                      "pe_order_implies_first_migratable",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_pseudo_left)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pe_action_optional, pe_order_pseudo_left,
                               data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_pseudo_left",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_optional)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pe_action_runnable, pe_order_optional, data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_optional",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(order->type, pe_order_asymmetrical)) {
         if (then->rsc != NULL) {
             changed |= update(then->rsc, first, then, node, first_flags,
                               pe_action_runnable, pe_order_asymmetrical,
                               data_set);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after pe_order_asymmetrical",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     if (pcmk_is_set(first->flags, pe_action_runnable)
         && pcmk_is_set(order->type, pe_order_implies_then_printed)
         && !pcmk_is_set(first_flags, pe_action_optional)) {
 
         pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
                      then->uuid, first->uuid);
         pe__set_action_flags(then, pe_action_print_always);
         // Don't bother marking 'then' as changed just for this
     }
 
     if (pcmk_is_set(order->type, pe_order_implies_first_printed)
         && !pcmk_is_set(then_flags, pe_action_optional)) {
 
         pe_rsc_trace(then->rsc, "%s will be in graph because %s is required",
                      first->uuid, then->uuid);
         pe__set_action_flags(first, pe_action_print_always);
         // Don't bother marking 'first' as changed just for this
     }
 
     if (pcmk_any_flags_set(order->type, pe_order_implies_then
                                         |pe_order_implies_first
                                         |pe_order_restart)
         && (first->rsc != NULL)
         && !pcmk_is_set(first->rsc->flags, pe_rsc_managed)
         && pcmk_is_set(first->rsc->flags, pe_rsc_block)
         && !pcmk_is_set(first->flags, pe_action_runnable)
         && pcmk__str_eq(first->task, PCMK_ACTION_STOP, pcmk__str_none)) {
 
         if (pcmk_is_set(then->flags, pe_action_runnable)) {
             pe__clear_action_flags(then, pe_action_runnable);
             pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         }
         pe_rsc_trace(then->rsc, "%s then %s: %s after checking whether first "
                      "is blocked, unmanaged, unrunnable stop",
                      first->uuid, then->uuid,
                      (changed? "changed" : "unchanged"));
     }
 
     return changed;
 }
 
 // Convenience macros for logging action properties
 
 #define action_type_str(flags) \
     (pcmk_is_set((flags), pe_action_pseudo)? "pseudo-action" : "action")
 
 #define action_optional_str(flags) \
     (pcmk_is_set((flags), pe_action_optional)? "optional" : "required")
 
 #define action_runnable_str(flags) \
     (pcmk_is_set((flags), pe_action_runnable)? "runnable" : "unrunnable")
 
 #define action_node_str(a) \
     (((a)->node == NULL)? "no node" : (a)->node->details->uname)
 
 /*!
  * \internal
  * \brief Update an action's flags for all orderings where it is "then"
  *
  * \param[in,out] then      Action to update
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__update_action_for_orderings(pe_action_t *then, pe_working_set_t *data_set)
 {
     GList *lpc = NULL;
     uint32_t changed = pcmk__updated_none;
     int last_flags = then->flags;
 
     pe_rsc_trace(then->rsc, "Updating %s %s (%s %s) on %s",
                  action_type_str(then->flags), then->uuid,
                  action_optional_str(then->flags),
                  action_runnable_str(then->flags), action_node_str(then));
 
     if (pcmk_is_set(then->flags, pe_action_requires_any)) {
         /* Initialize current known "runnable before" actions. As
          * update_action_for_ordering_flags() is called for each of then's
          * before actions, this number will increment as runnable 'first'
          * actions are encountered.
          */
         then->runnable_before = 0;
 
         if (then->required_runnable_before == 0) {
             /* @COMPAT This ordering constraint uses the deprecated
              * "require-all=false" attribute. Treat it like "clone-min=1".
              */
             then->required_runnable_before = 1;
         }
 
         /* The pe_order_one_or_more clause of update_action_for_ordering_flags()
          * (called below) will reset runnable if appropriate.
          */
         pe__clear_action_flags(then, pe_action_runnable);
     }
 
     for (lpc = then->actions_before; lpc != NULL; lpc = lpc->next) {
         pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
         pe_action_t *first = other->action;
 
         pe_node_t *then_node = then->node;
         pe_node_t *first_node = first->node;
 
         if ((first->rsc != NULL)
-            && (first->rsc->variant == pe_group)
+            && (first->rsc->variant == pcmk_rsc_variant_group)
             && pcmk__str_eq(first->task, PCMK_ACTION_START, pcmk__str_none)) {
 
             first_node = first->rsc->fns->location(first->rsc, NULL, FALSE);
             if (first_node != NULL) {
                 pe_rsc_trace(first->rsc, "Found %s for 'first' %s",
                              pe__node_name(first_node), first->uuid);
             }
         }
 
         if ((then->rsc != NULL)
-            && (then->rsc->variant == pe_group)
+            && (then->rsc->variant == pcmk_rsc_variant_group)
             && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)) {
 
             then_node = then->rsc->fns->location(then->rsc, NULL, FALSE);
             if (then_node != NULL) {
                 pe_rsc_trace(then->rsc, "Found %s for 'then' %s",
                              pe__node_name(then_node), then->uuid);
             }
         }
 
         // Disable constraint if it only applies when on same node, but isn't
         if (pcmk_is_set(other->type, pe_order_same_node)
             && (first_node != NULL) && (then_node != NULL)
             && !pe__same_node(first_node, then_node)) {
 
             pe_rsc_trace(then->rsc,
                          "Disabled ordering %s on %s then %s on %s: "
                          "not same node",
                          other->action->uuid, pe__node_name(first_node),
                          then->uuid, pe__node_name(then_node));
             other->type = pe_order_none;
             continue;
         }
 
         pcmk__clear_updated_flags(changed, then, pcmk__updated_first);
 
         if ((first->rsc != NULL)
             && pcmk_is_set(other->type, pe_order_then_cancels_first)
             && !pcmk_is_set(then->flags, pe_action_optional)) {
 
             /* 'then' is required, so we must abandon 'first'
              * (e.g. a required stop cancels any agent reload).
              */
             pe__set_action_flags(other->action, pe_action_optional);
             if (!strcmp(first->task, PCMK_ACTION_RELOAD_AGENT)) {
                 pe__clear_resource_flags(first->rsc, pe_rsc_reload);
             }
         }
 
         if ((first->rsc != NULL) && (then->rsc != NULL)
             && (first->rsc != then->rsc) && !is_parent(then->rsc, first->rsc)) {
             first = action_for_ordering(first);
         }
         if (first != other->action) {
             pe_rsc_trace(then->rsc, "Ordering %s after %s instead of %s",
                          then->uuid, first->uuid, other->action->uuid);
         }
 
         pe_rsc_trace(then->rsc,
                      "%s (%#.6x) then %s (%#.6x): type=%#.6x node=%s",
                      first->uuid, first->flags, then->uuid, then->flags,
                      other->type, action_node_str(first));
 
         if (first == other->action) {
             /* 'first' was not remapped (e.g. from 'start' to 'running'), which
              * could mean it is a non-resource action, a primitive resource
              * action, or already expanded.
              */
             uint32_t first_flags, then_flags;
 
             first_flags = action_flags_for_ordering(first, then_node);
             then_flags = action_flags_for_ordering(then, first_node);
 
             changed |= update_action_for_ordering_flags(first, then,
                                                         first_flags, then_flags,
                                                         other, data_set);
 
             /* 'first' was for a complex resource (clone, group, etc),
              * create a new dependency if necessary
              */
         } else if (order_actions(first, then, other->type)) {
             /* This was the first time 'first' and 'then' were associated,
              * start again to get the new actions_before list
              */
             pcmk__set_updated_flags(changed, then, pcmk__updated_then);
             pe_rsc_trace(then->rsc,
                          "Disabled ordering %s then %s in favor of %s then %s",
                          other->action->uuid, then->uuid, first->uuid,
                          then->uuid);
             other->type = pe_order_none;
         }
 
 
         if (pcmk_is_set(changed, pcmk__updated_first)) {
             crm_trace("Re-processing %s and its 'after' actions "
                       "because it changed", first->uuid);
             for (GList *lpc2 = first->actions_after; lpc2 != NULL;
                  lpc2 = lpc2->next) {
                 pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc2->data;
 
                 pcmk__update_action_for_orderings(other->action, data_set);
             }
             pcmk__update_action_for_orderings(first, data_set);
         }
     }
 
     if (pcmk_is_set(then->flags, pe_action_requires_any)) {
         if (last_flags == then->flags) {
             pcmk__clear_updated_flags(changed, then, pcmk__updated_then);
         } else {
             pcmk__set_updated_flags(changed, then, pcmk__updated_then);
         }
     }
 
     if (pcmk_is_set(changed, pcmk__updated_then)) {
         crm_trace("Re-processing %s and its 'after' actions because it changed",
                   then->uuid);
         if (pcmk_is_set(last_flags, pe_action_runnable)
             && !pcmk_is_set(then->flags, pe_action_runnable)) {
             pcmk__block_colocation_dependents(then);
         }
         pcmk__update_action_for_orderings(then, data_set);
         for (lpc = then->actions_after; lpc != NULL; lpc = lpc->next) {
             pe_action_wrapper_t *other = (pe_action_wrapper_t *) lpc->data;
 
             pcmk__update_action_for_orderings(other->action, data_set);
         }
     }
 }
 
 static inline bool
 is_primitive_action(const pe_action_t *action)
 {
     return (action != NULL) && (action->rsc != NULL)
            && (action->rsc->variant == pcmk_rsc_variant_primitive);
 }
 
 /*!
  * \internal
  * \brief Clear a single action flag and set reason text
  *
  * \param[in,out] action  Action whose flag should be cleared
  * \param[in]     flag    Action flag that should be cleared
  * \param[in]     reason  Action that is the reason why flag is being cleared
  */
 #define clear_action_flag_because(action, flag, reason) do {                \
         if (pcmk_is_set((action)->flags, (flag))) {                         \
             pe__clear_action_flags(action, flag);                           \
             if ((action)->rsc != (reason)->rsc) {                           \
                 char *reason_text = pe__action2reason((reason), (flag));    \
                 pe_action_set_reason((action), reason_text, false);         \
                 free(reason_text);                                          \
             }                                                               \
         }                                                                   \
     } while (0)
 
 /*!
  * \internal
  * \brief Update actions in an asymmetric ordering
  *
  * If the "first" action in an asymmetric ordering is unrunnable, make the
  * "second" action unrunnable as well, if appropriate.
  *
  * \param[in]     first  'First' action in an asymmetric ordering
  * \param[in,out] then   'Then' action in an asymmetric ordering
  */
 static void
 handle_asymmetric_ordering(const pe_action_t *first, pe_action_t *then)
 {
     /* Only resource actions after an unrunnable 'first' action need updates for
      * asymmetric ordering.
      */
     if ((then->rsc == NULL) || pcmk_is_set(first->flags, pe_action_runnable)) {
         return;
     }
 
     // Certain optional 'then' actions are unaffected by unrunnable 'first'
     if (pcmk_is_set(then->flags, pe_action_optional)) {
         enum rsc_role_e then_rsc_role = then->rsc->fns->state(then->rsc, TRUE);
 
         if ((then_rsc_role == pcmk_role_stopped)
             && pcmk__str_eq(then->task, PCMK_ACTION_STOP, pcmk__str_none)) {
             /* If 'then' should stop after 'first' but is already stopped, the
              * ordering is irrelevant.
              */
             return;
         } else if ((then_rsc_role >= pcmk_role_started)
             && pcmk__str_eq(then->task, PCMK_ACTION_START, pcmk__str_none)
             && pe__rsc_running_on_only(then->rsc, then->node)) {
             /* Similarly if 'then' should start after 'first' but is already
              * started on a single node.
              */
             return;
         }
     }
 
     // 'First' can't run, so 'then' can't either
     clear_action_flag_because(then, pe_action_optional, first);
     clear_action_flag_because(then, pe_action_runnable, first);
 }
 
 /*!
  * \internal
  * \brief Set action bits appropriately when pe_restart_order is used
  *
  * \param[in,out] first   'First' action in an ordering with pe_restart_order
  * \param[in,out] then    'Then' action in an ordering with pe_restart_order
  * \param[in]     filter  What action flags to care about
  *
  * \note pe_restart_order is set for "stop resource before starting it" and
  *       "stop later group member before stopping earlier group member"
  */
 static void
 handle_restart_ordering(pe_action_t *first, pe_action_t *then, uint32_t filter)
 {
     const char *reason = NULL;
 
     CRM_ASSERT(is_primitive_action(first));
     CRM_ASSERT(is_primitive_action(then));
 
     // We need to update the action in two cases:
 
     // ... if 'then' is required
     if (pcmk_is_set(filter, pe_action_optional)
         && !pcmk_is_set(then->flags, pe_action_optional)) {
         reason = "restart";
     }
 
     /* ... if 'then' is unrunnable action on same resource (if a resource
      * should restart but can't start, we still want to stop)
      */
     if (pcmk_is_set(filter, pe_action_runnable)
         && !pcmk_is_set(then->flags, pe_action_runnable)
         && pcmk_is_set(then->rsc->flags, pe_rsc_managed)
         && (first->rsc == then->rsc)) {
         reason = "stop";
     }
 
     if (reason == NULL) {
         return;
     }
 
     pe_rsc_trace(first->rsc, "Handling %s -> %s for %s",
                  first->uuid, then->uuid, reason);
 
     // Make 'first' required if it is runnable
     if (pcmk_is_set(first->flags, pe_action_runnable)) {
         clear_action_flag_because(first, pe_action_optional, then);
     }
 
     // Make 'first' required if 'then' is required
     if (!pcmk_is_set(then->flags, pe_action_optional)) {
         clear_action_flag_because(first, pe_action_optional, then);
     }
 
     // Make 'first' unmigratable if 'then' is unmigratable
     if (!pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
         clear_action_flag_because(first, pe_action_migrate_runnable, then);
     }
 
     // Make 'then' unrunnable if 'first' is required but unrunnable
     if (!pcmk_is_set(first->flags, pe_action_optional)
         && !pcmk_is_set(first->flags, pe_action_runnable)) {
         clear_action_flag_because(then, pe_action_runnable, first);
     }
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two actions, update the actions' flags
  * (and runnable_before members if appropriate) as appropriate for the ordering.
  * Effects may cascade to other orderings involving the actions as well.
  *
  * \param[in,out] first     'First' action in an ordering
  * \param[in,out] then      'Then' action in an ordering
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  *                          (ignored)
  * \param[in]     flags     Action flags for \p first for ordering purposes
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  * \param[in,out] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
                              const pe_node_t *node, uint32_t flags,
                              uint32_t filter, uint32_t type,
                              pe_working_set_t *data_set)
 {
     uint32_t changed = pcmk__updated_none;
     uint32_t then_flags = 0U;
     uint32_t first_flags = 0U;
 
     CRM_ASSERT((first != NULL) && (then != NULL) && (data_set != NULL));
 
     then_flags = then->flags;
     first_flags = first->flags;
     if (pcmk_is_set(type, pe_order_asymmetrical)) {
         handle_asymmetric_ordering(first, then);
     }
 
     if (pcmk_is_set(type, pe_order_implies_first)
         && !pcmk_is_set(then_flags, pe_action_optional)) {
         // Then is required, and implies first should be, too
 
         if (pcmk_is_set(filter, pe_action_optional)
             && !pcmk_is_set(flags, pe_action_optional)
             && pcmk_is_set(first_flags, pe_action_optional)) {
             clear_action_flag_because(first, pe_action_optional, then);
         }
 
         if (pcmk_is_set(flags, pe_action_migrate_runnable)
             && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
             clear_action_flag_because(first, pe_action_migrate_runnable, then);
         }
     }
 
     if (pcmk_is_set(type, pe_order_promoted_implies_first)
         && (then->rsc != NULL) && (then->rsc->role == pcmk_role_promoted)
         && pcmk_is_set(filter, pe_action_optional)
         && !pcmk_is_set(then->flags, pe_action_optional)) {
 
         clear_action_flag_because(first, pe_action_optional, then);
 
         if (pcmk_is_set(first->flags, pe_action_migrate_runnable)
             && !pcmk_is_set(then->flags, pe_action_migrate_runnable)) {
             clear_action_flag_because(first, pe_action_migrate_runnable,
                                       then);
         }
     }
 
     if (pcmk_is_set(type, pe_order_implies_first_migratable)
         && pcmk_is_set(filter, pe_action_optional)) {
 
         if (!pcmk_all_flags_set(then->flags, pe_action_migrate_runnable
                                              |pe_action_runnable)) {
             clear_action_flag_because(first, pe_action_runnable, then);
         }
 
         if (!pcmk_is_set(then->flags, pe_action_optional)) {
             clear_action_flag_because(first, pe_action_optional, then);
         }
     }
 
     if (pcmk_is_set(type, pe_order_pseudo_left)
         && pcmk_is_set(filter, pe_action_optional)
         && !pcmk_is_set(first->flags, pe_action_runnable)) {
 
         clear_action_flag_because(then, pe_action_migrate_runnable, first);
         pe__clear_action_flags(then, pe_action_pseudo);
     }
 
     if (pcmk_is_set(type, pe_order_runnable_left)
         && pcmk_is_set(filter, pe_action_runnable)
         && pcmk_is_set(then->flags, pe_action_runnable)
         && !pcmk_is_set(flags, pe_action_runnable)) {
 
         clear_action_flag_because(then, pe_action_runnable, first);
         clear_action_flag_because(then, pe_action_migrate_runnable, first);
     }
 
     if (pcmk_is_set(type, pe_order_implies_then)
         && pcmk_is_set(filter, pe_action_optional)
         && pcmk_is_set(then->flags, pe_action_optional)
         && !pcmk_is_set(flags, pe_action_optional)
         && !pcmk_is_set(first->flags, pe_action_migrate_runnable)) {
 
         clear_action_flag_because(then, pe_action_optional, first);
     }
 
     if (pcmk_is_set(type, pe_order_restart)) {
         handle_restart_ordering(first, then, filter);
     }
 
     if (then_flags != then->flags) {
         pcmk__set_updated_flags(changed, first, pcmk__updated_then);
         pe_rsc_trace(then->rsc,
                      "%s on %s: flags are now %#.6x (was %#.6x) "
                      "because of 'first' %s (%#.6x)",
                      then->uuid, pe__node_name(then->node),
                      then->flags, then_flags, first->uuid, first->flags);
 
         if ((then->rsc != NULL) && (then->rsc->parent != NULL)) {
             // Required to handle "X_stop then X_start" for cloned groups
             pcmk__update_action_for_orderings(then, data_set);
         }
     }
 
     if (first_flags != first->flags) {
         pcmk__set_updated_flags(changed, first, pcmk__updated_first);
         pe_rsc_trace(first->rsc,
                      "%s on %s: flags are now %#.6x (was %#.6x) "
                      "because of 'then' %s (%#.6x)",
                      first->uuid, pe__node_name(first->node),
                      first->flags, first_flags, then->uuid, then->flags);
     }
 
     return changed;
 }
 
 /*!
  * \internal
  * \brief Trace-log an action (optionally with its dependent actions)
  *
  * \param[in] pre_text  If not NULL, prefix the log with this plus ": "
  * \param[in] action    Action to log
  * \param[in] details   If true, recursively log dependent actions
  */
 void
 pcmk__log_action(const char *pre_text, const pe_action_t *action, bool details)
 {
     const char *node_uname = NULL;
     const char *node_uuid = NULL;
     const char *desc = NULL;
 
     CRM_CHECK(action != NULL, return);
 
     if (!pcmk_is_set(action->flags, pe_action_pseudo)) {
         if (action->node != NULL) {
             node_uname = action->node->details->uname;
             node_uuid = action->node->details->id;
         } else {
             node_uname = "<none>";
         }
     }
 
     switch (text2task(action->task)) {
         case pcmk_action_fence:
         case pcmk_action_shutdown:
             if (pcmk_is_set(action->flags, pe_action_pseudo)) {
                 desc = "Pseudo ";
             } else if (pcmk_is_set(action->flags, pe_action_optional)) {
                 desc = "Optional ";
             } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
                 desc = "!!Non-Startable!! ";
             } else if (pcmk_is_set(action->flags, pe_action_processed)) {
                desc = "";
             } else {
                desc = "(Provisional) ";
             }
             crm_trace("%s%s%sAction %d: %s%s%s%s%s%s",
                       ((pre_text == NULL)? "" : pre_text),
                       ((pre_text == NULL)? "" : ": "),
                       desc, action->id, action->uuid,
                       (node_uname? "\ton " : ""), (node_uname? node_uname : ""),
                       (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
                       (node_uuid? ")" : ""));
             break;
         default:
             if (pcmk_is_set(action->flags, pe_action_optional)) {
                 desc = "Optional ";
             } else if (pcmk_is_set(action->flags, pe_action_pseudo)) {
                 desc = "Pseudo ";
             } else if (!pcmk_is_set(action->flags, pe_action_runnable)) {
                 desc = "!!Non-Startable!! ";
             } else if (pcmk_is_set(action->flags, pe_action_processed)) {
                desc = "";
             } else {
                desc = "(Provisional) ";
             }
             crm_trace("%s%s%sAction %d: %s %s%s%s%s%s%s",
                       ((pre_text == NULL)? "" : pre_text),
                       ((pre_text == NULL)? "" : ": "),
                       desc, action->id, action->uuid,
                       (action->rsc? action->rsc->id : "<none>"),
                       (node_uname? "\ton " : ""), (node_uname? node_uname : ""),
                       (node_uuid? "\t\t(" : ""), (node_uuid? node_uuid : ""),
                       (node_uuid? ")" : ""));
             break;
     }
 
     if (details) {
         const GList *iter = NULL;
         const pe_action_wrapper_t *other = NULL;
 
         crm_trace("\t\t====== Preceding Actions");
         for (iter = action->actions_before; iter != NULL; iter = iter->next) {
             other = (const pe_action_wrapper_t *) iter->data;
             pcmk__log_action("\t\t", other->action, false);
         }
         crm_trace("\t\t====== Subsequent Actions");
         for (iter = action->actions_after; iter != NULL; iter = iter->next) {
             other = (const pe_action_wrapper_t *) iter->data;
             pcmk__log_action("\t\t", other->action, false);
         }
         crm_trace("\t\t====== End");
 
     } else {
         crm_trace("\t\t(before=%d, after=%d)",
                   g_list_length(action->actions_before),
                   g_list_length(action->actions_after));
     }
 }
 
 /*!
  * \internal
  * \brief Create a new shutdown action for a node
  *
  * \param[in,out] node  Node being shut down
  *
  * \return Newly created shutdown action for \p node
  */
 pe_action_t *
 pcmk__new_shutdown_action(pe_node_t *node)
 {
     char *shutdown_id = NULL;
     pe_action_t *shutdown_op = NULL;
 
     CRM_ASSERT(node != NULL);
 
     shutdown_id = crm_strdup_printf("%s-%s", PCMK_ACTION_DO_SHUTDOWN,
                                     node->details->uname);
 
     shutdown_op = custom_action(NULL, shutdown_id, PCMK_ACTION_DO_SHUTDOWN,
                                 node, FALSE, TRUE, node->details->data_set);
 
     pcmk__order_stops_before_shutdown(node, shutdown_op);
     add_hash_param(shutdown_op->meta, XML_ATTR_TE_NOWAIT, XML_BOOLEAN_TRUE);
     return shutdown_op;
 }
 
 /*!
  * \internal
  * \brief Calculate and add an operation digest to XML
  *
  * Calculate an operation digest, which enables us to later determine when a
  * restart is needed due to the resource's parameters being changed, and add it
  * to given XML.
  *
  * \param[in]     op      Operation result from executor
  * \param[in,out] update  XML to add digest to
  */
 static void
 add_op_digest_to_xml(const lrmd_event_data_t *op, xmlNode *update)
 {
     char *digest = NULL;
     xmlNode *args_xml = NULL;
 
     if (op->params == NULL) {
         return;
     }
     args_xml = create_xml_node(NULL, XML_TAG_PARAMS);
     g_hash_table_foreach(op->params, hash2field, args_xml);
     pcmk__filter_op_for_digest(args_xml);
     digest = calculate_operation_digest(args_xml, NULL);
     crm_xml_add(update, XML_LRM_ATTR_OP_DIGEST, digest);
     free_xml(args_xml);
     free(digest);
 }
 
 #define FAKE_TE_ID     "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx"
 
 /*!
  * \internal
  * \brief Create XML for resource operation history update
  *
  * \param[in,out] parent          Parent XML node to add to
  * \param[in,out] op              Operation event data
  * \param[in]     caller_version  DC feature set
  * \param[in]     target_rc       Expected result of operation
  * \param[in]     node            Name of node on which operation was performed
  * \param[in]     origin          Arbitrary description of update source
  *
  * \return Newly created XML node for history update
  */
 xmlNode *
 pcmk__create_history_xml(xmlNode *parent, lrmd_event_data_t *op,
                          const char *caller_version, int target_rc,
                          const char *node, const char *origin)
 {
     char *key = NULL;
     char *magic = NULL;
     char *op_id = NULL;
     char *op_id_additional = NULL;
     char *local_user_data = NULL;
     const char *exit_reason = NULL;
 
     xmlNode *xml_op = NULL;
     const char *task = NULL;
 
     CRM_CHECK(op != NULL, return NULL);
     crm_trace("Creating history XML for %s-interval %s action for %s on %s "
               "(DC version: %s, origin: %s)",
               pcmk__readable_interval(op->interval_ms), op->op_type, op->rsc_id,
               ((node == NULL)? "no node" : node), caller_version, origin);
 
     task = op->op_type;
 
     /* Record a successful agent reload as a start, and a failed one as a
      * monitor, to make life easier for the scheduler when determining the
      * current state.
      *
      * @COMPAT We should check "reload" here only if the operation was for a
      * pre-OCF-1.1 resource agent, but we don't know that here, and we should
      * only ever get results for actions scheduled by us, so we can reasonably
      * assume any "reload" is actually a pre-1.1 agent reload.
      */
     if (pcmk__str_any_of(task, PCMK_ACTION_RELOAD, PCMK_ACTION_RELOAD_AGENT,
                          NULL)) {
         if (op->op_status == PCMK_EXEC_DONE) {
             task = PCMK_ACTION_START;
         } else {
             task = PCMK_ACTION_MONITOR;
         }
     }
 
     key = pcmk__op_key(op->rsc_id, task, op->interval_ms);
     if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
         const char *n_type = crm_meta_value(op->params, "notify_type");
         const char *n_task = crm_meta_value(op->params, "notify_operation");
 
         CRM_LOG_ASSERT(n_type != NULL);
         CRM_LOG_ASSERT(n_task != NULL);
         op_id = pcmk__notify_key(op->rsc_id, n_type, n_task);
 
         if (op->op_status != PCMK_EXEC_PENDING) {
             /* Ignore notify errors.
              *
              * @TODO It might be better to keep the correct result here, and
              * ignore it in process_graph_event().
              */
             lrmd__set_result(op, PCMK_OCF_OK, PCMK_EXEC_DONE, NULL);
         }
 
     /* Migration history is preserved separately, which usually matters for
      * multiple nodes and is important for future cluster transitions.
      */
     } else if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
                                 PCMK_ACTION_MIGRATE_FROM, NULL)) {
         op_id = strdup(key);
 
     } else if (did_rsc_op_fail(op, target_rc)) {
         op_id = pcmk__op_key(op->rsc_id, "last_failure", 0);
         if (op->interval_ms == 0) {
             // Ensure 'last' gets updated, in case record-pending is true
             op_id_additional = pcmk__op_key(op->rsc_id, "last", 0);
         }
         exit_reason = op->exit_reason;
 
     } else if (op->interval_ms > 0) {
         op_id = strdup(key);
 
     } else {
         op_id = pcmk__op_key(op->rsc_id, "last", 0);
     }
 
   again:
     xml_op = pcmk__xe_match(parent, XML_LRM_TAG_RSC_OP, XML_ATTR_ID, op_id);
     if (xml_op == NULL) {
         xml_op = create_xml_node(parent, XML_LRM_TAG_RSC_OP);
     }
 
     if (op->user_data == NULL) {
         crm_debug("Generating fake transition key for: " PCMK__OP_FMT
                   " %d from %s", op->rsc_id, op->op_type, op->interval_ms,
                   op->call_id, origin);
         local_user_data = pcmk__transition_key(-1, op->call_id, target_rc,
                                                FAKE_TE_ID);
         op->user_data = local_user_data;
     }
 
     if (magic == NULL) {
         magic = crm_strdup_printf("%d:%d;%s", op->op_status, op->rc,
                                   (const char *) op->user_data);
     }
 
     crm_xml_add(xml_op, XML_ATTR_ID, op_id);
     crm_xml_add(xml_op, XML_LRM_ATTR_TASK_KEY, key);
     crm_xml_add(xml_op, XML_LRM_ATTR_TASK, task);
     crm_xml_add(xml_op, XML_ATTR_ORIGIN, origin);
     crm_xml_add(xml_op, XML_ATTR_CRM_VERSION, caller_version);
     crm_xml_add(xml_op, XML_ATTR_TRANSITION_KEY, op->user_data);
     crm_xml_add(xml_op, XML_ATTR_TRANSITION_MAGIC, magic);
     crm_xml_add(xml_op, XML_LRM_ATTR_EXIT_REASON, pcmk__s(exit_reason, ""));
     crm_xml_add(xml_op, XML_LRM_ATTR_TARGET, node); // For context during triage
 
     crm_xml_add_int(xml_op, XML_LRM_ATTR_CALLID, op->call_id);
     crm_xml_add_int(xml_op, XML_LRM_ATTR_RC, op->rc);
     crm_xml_add_int(xml_op, XML_LRM_ATTR_OPSTATUS, op->op_status);
     crm_xml_add_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, op->interval_ms);
 
     if (compare_version("2.1", caller_version) <= 0) {
         if (op->t_run || op->t_rcchange || op->exec_time || op->queue_time) {
             crm_trace("Timing data (" PCMK__OP_FMT
                       "): last=%u change=%u exec=%u queue=%u",
                       op->rsc_id, op->op_type, op->interval_ms,
                       op->t_run, op->t_rcchange, op->exec_time, op->queue_time);
 
             if ((op->interval_ms != 0) && (op->t_rcchange != 0)) {
                 // Recurring ops may have changed rc after initial run
                 crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
                                (long long) op->t_rcchange);
             } else {
                 crm_xml_add_ll(xml_op, XML_RSC_OP_LAST_CHANGE,
                                (long long) op->t_run);
             }
 
             crm_xml_add_int(xml_op, XML_RSC_OP_T_EXEC, op->exec_time);
             crm_xml_add_int(xml_op, XML_RSC_OP_T_QUEUE, op->queue_time);
         }
     }
 
     if (pcmk__str_any_of(op->op_type, PCMK_ACTION_MIGRATE_TO,
                          PCMK_ACTION_MIGRATE_FROM, NULL)) {
         /*
          * Record migrate_source and migrate_target always for migrate ops.
          */
         const char *name = XML_LRM_ATTR_MIGRATE_SOURCE;
 
         crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
 
         name = XML_LRM_ATTR_MIGRATE_TARGET;
         crm_xml_add(xml_op, name, crm_meta_value(op->params, name));
     }
 
     add_op_digest_to_xml(op, xml_op);
 
     if (op_id_additional) {
         free(op_id);
         op_id = op_id_additional;
         op_id_additional = NULL;
         goto again;
     }
 
     if (local_user_data) {
         free(local_user_data);
         op->user_data = NULL;
     }
     free(magic);
     free(op_id);
     free(key);
     return xml_op;
 }
 
 /*!
  * \internal
  * \brief Check whether an action shutdown-locks a resource to a node
  *
  * If the shutdown-lock cluster property is set, resources will not be recovered
  * on a different node if cleanly stopped, and may start only on that same node.
  * This function checks whether that applies to a given action, so that the
  * transition graph can be marked appropriately.
  *
  * \param[in] action  Action to check
  *
  * \return true if \p action locks its resource to the action's node,
  *         otherwise false
  */
 bool
 pcmk__action_locks_rsc_to_node(const pe_action_t *action)
 {
     // Only resource actions taking place on resource's lock node are locked
     if ((action == NULL) || (action->rsc == NULL)
         || !pe__same_node(action->node, action->rsc->lock_node)) {
         return false;
     }
 
     /* During shutdown, only stops are locked (otherwise, another action such as
      * a demote would cause the controller to clear the lock)
      */
     if (action->node->details->shutdown && (action->task != NULL)
         && (strcmp(action->task, PCMK_ACTION_STOP) != 0)) {
         return false;
     }
 
     return true;
 }
 
 /* lowest to highest */
 static gint
 sort_action_id(gconstpointer a, gconstpointer b)
 {
     const pe_action_wrapper_t *action_wrapper2 = (const pe_action_wrapper_t *)a;
     const pe_action_wrapper_t *action_wrapper1 = (const pe_action_wrapper_t *)b;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
     if (action_wrapper1->action->id < action_wrapper2->action->id) {
         return 1;
     }
     if (action_wrapper1->action->id > action_wrapper2->action->id) {
         return -1;
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Remove any duplicate action inputs, merging action flags
  *
  * \param[in,out] action  Action whose inputs should be checked
  */
 void
 pcmk__deduplicate_action_inputs(pe_action_t *action)
 {
     GList *item = NULL;
     GList *next = NULL;
     pe_action_wrapper_t *last_input = NULL;
 
     action->actions_before = g_list_sort(action->actions_before,
                                          sort_action_id);
     for (item = action->actions_before; item != NULL; item = next) {
         pe_action_wrapper_t *input = (pe_action_wrapper_t *) item->data;
 
         next = item->next;
         if ((last_input != NULL)
             && (input->action->id == last_input->action->id)) {
             crm_trace("Input %s (%d) duplicate skipped for action %s (%d)",
                       input->action->uuid, input->action->id,
                       action->uuid, action->id);
 
             /* For the purposes of scheduling, the ordering flags no longer
              * matter, but crm_simulate looks at certain ones when creating a
              * dot graph. Combining the flags is sufficient for that purpose.
              */
             last_input->type |= input->type;
             if (input->state == pe_link_dumped) {
                 last_input->state = pe_link_dumped;
             }
 
             free(item->data);
             action->actions_before = g_list_delete_link(action->actions_before,
                                                         item);
         } else {
             last_input = input;
             input->state = pe_link_not_dumped;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Output all scheduled actions
  *
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__output_actions(pe_working_set_t *data_set)
 {
     pcmk__output_t *out = data_set->priv;
 
     // Output node (non-resource) actions
     for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
         char *node_name = NULL;
         char *task = NULL;
         pe_action_t *action = (pe_action_t *) iter->data;
 
         if (action->rsc != NULL) {
             continue; // Resource actions will be output later
 
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             continue; // This action was not scheduled
         }
 
         if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN,
                          pcmk__str_none)) {
             task = strdup("Shutdown");
 
         } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
                                 pcmk__str_none)) {
             const char *op = g_hash_table_lookup(action->meta,
                                                  "stonith_action");
 
             task = crm_strdup_printf("Fence (%s)", op);
 
         } else {
             continue; // Don't display other node action types
         }
 
         if (pe__is_guest_node(action->node)) {
             const pe_resource_t *remote = action->node->details->remote_rsc;
 
             node_name = crm_strdup_printf("%s (resource: %s)",
                                           pe__node_name(action->node),
                                           remote->container->id);
         } else if (action->node != NULL) {
             node_name = crm_strdup_printf("%s", pe__node_name(action->node));
         }
 
         out->message(out, "node-action", task, node_name, action->reason);
 
         free(node_name);
         free(task);
     }
 
     // Output resource actions
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         rsc->cmds->output_actions(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether action from resource history is still in configuration
  *
  * \param[in] rsc          Resource that action is for
  * \param[in] task         Action's name
  * \param[in] interval_ms  Action's interval (in milliseconds)
  *
  * \return true if action is still in resource configuration, otherwise false
  */
 static bool
 action_in_config(const pe_resource_t *rsc, const char *task, guint interval_ms)
 {
     char *key = pcmk__op_key(rsc->id, task, interval_ms);
     bool config = (find_rsc_op_entry(rsc, key) != NULL);
 
     free(key);
     return config;
 }
 
 /*!
  * \internal
  * \brief Get action name needed to compare digest for configuration changes
  *
  * \param[in] task         Action name from history
  * \param[in] interval_ms  Action interval (in milliseconds)
  *
  * \return Action name whose digest should be compared
  */
 static const char *
 task_for_digest(const char *task, guint interval_ms)
 {
     /* Certain actions need to be compared against the parameters used to start
      * the resource.
      */
     if ((interval_ms == 0)
         && pcmk__str_any_of(task, PCMK_ACTION_MONITOR, PCMK_ACTION_MIGRATE_FROM,
                             PCMK_ACTION_PROMOTE, NULL)) {
         task = PCMK_ACTION_START;
     }
     return task;
 }
 
 /*!
  * \internal
  * \brief Check whether only sanitized parameters to an action changed
  *
  * When collecting CIB files for troubleshooting, crm_report will mask
  * sensitive resource parameters. If simulations were run using that, affected
  * resources would appear to need a restart, which would complicate
  * troubleshooting. To avoid that, we save a "secure digest" of non-sensitive
  * parameters. This function used that digest to check whether only masked
  * parameters are different.
  *
  * \param[in] xml_op       Resource history entry with secure digest
  * \param[in] digest_data  Operation digest information being compared
  * \param[in] data_set     Cluster working set
  *
  * \return true if only sanitized parameters changed, otherwise false
  */
 static bool
 only_sanitized_changed(const xmlNode *xml_op,
                        const op_digest_cache_t *digest_data,
                        const pe_working_set_t *data_set)
 {
     const char *digest_secure = NULL;
 
     if (!pcmk_is_set(data_set->flags, pe_flag_sanitized)) {
         // The scheduler is not being run as a simulation
         return false;
     }
 
     digest_secure = crm_element_value(xml_op, XML_LRM_ATTR_SECURE_DIGEST);
 
     return (digest_data->rc != RSC_DIGEST_MATCH) && (digest_secure != NULL)
            && (digest_data->digest_secure_calc != NULL)
            && (strcmp(digest_data->digest_secure_calc, digest_secure) == 0);
 }
 
 /*!
  * \internal
  * \brief Force a restart due to a configuration change
  *
  * \param[in,out] rsc          Resource that action is for
  * \param[in]     task         Name of action whose configuration changed
  * \param[in]     interval_ms  Action interval (in milliseconds)
  * \param[in,out] node         Node where resource should be restarted
  */
 static void
 force_restart(pe_resource_t *rsc, const char *task, guint interval_ms,
               pe_node_t *node)
 {
     char *key = pcmk__op_key(rsc->id, task, interval_ms);
     pe_action_t *required = custom_action(rsc, key, task, NULL, FALSE, TRUE,
                                           rsc->cluster);
 
     pe_action_set_reason(required, "resource definition change", true);
     trigger_unfencing(rsc, node, "Device parameters changed", NULL,
                       rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Schedule a reload of a resource on a node
  *
  * \param[in,out] data       Resource to reload
  * \param[in]     user_data  Where resource should be reloaded
  */
 static void
 schedule_reload(gpointer data, gpointer user_data)
 {
     pe_resource_t *rsc = data;
     const pe_node_t *node = user_data;
     pe_action_t *reload = NULL;
 
     // For collective resources, just call recursively for children
     if (rsc->variant > pcmk_rsc_variant_primitive) {
         g_list_foreach(rsc->children, schedule_reload, user_data);
         return;
     }
 
     // Skip the reload in certain situations
     if ((node == NULL)
         || !pcmk_is_set(rsc->flags, pe_rsc_managed)
         || pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         pe_rsc_trace(rsc, "Skip reload of %s:%s%s %s",
                      rsc->id,
                      pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : " unmanaged",
                      pcmk_is_set(rsc->flags, pe_rsc_failed)? " failed" : "",
                      (node == NULL)? "inactive" : node->details->uname);
         return;
     }
 
     /* If a resource's configuration changed while a start was pending,
      * force a full restart instead of a reload.
      */
     if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
         pe_rsc_trace(rsc, "%s: preventing agent reload because start pending",
                      rsc->id);
         custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE, TRUE,
                       rsc->cluster);
         return;
     }
 
     // Schedule the reload
     pe__set_resource_flags(rsc, pe_rsc_reload);
     reload = custom_action(rsc, reload_key(rsc), PCMK_ACTION_RELOAD_AGENT, node,
                            FALSE, TRUE, rsc->cluster);
     pe_action_set_reason(reload, "resource definition change", FALSE);
 
     // Set orderings so that a required stop or demote cancels the reload
     pcmk__new_ordering(NULL, NULL, reload, rsc, stop_key(rsc), NULL,
                        pe_order_optional|pe_order_then_cancels_first,
                        rsc->cluster);
     pcmk__new_ordering(NULL, NULL, reload, rsc, demote_key(rsc), NULL,
                        pe_order_optional|pe_order_then_cancels_first,
                        rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Handle any configuration change for an action
  *
  * Given an action from resource history, if the resource's configuration
  * changed since the action was done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, etc.).
  *
  * \param[in,out] rsc     Resource that action is for
  * \param[in,out] node    Node that action was on
  * \param[in]     xml_op  Action XML from resource history
  *
  * \return true if action configuration changed, otherwise false
  */
 bool
 pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
                           const xmlNode *xml_op)
 {
     guint interval_ms = 0;
     const char *task = NULL;
     const op_digest_cache_t *digest_data = NULL;
 
     CRM_CHECK((rsc != NULL) && (node != NULL) && (xml_op != NULL),
               return false);
 
     task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
     CRM_CHECK(task != NULL, return false);
 
     crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
     // If this is a recurring action, check whether it has been orphaned
     if (interval_ms > 0) {
         if (action_in_config(rsc, task, interval_ms)) {
             pe_rsc_trace(rsc, "%s-interval %s for %s on %s is in configuration",
                          pcmk__readable_interval(interval_ms), task, rsc->id,
                          pe__node_name(node));
         } else if (pcmk_is_set(rsc->cluster->flags,
                                pe_flag_stop_action_orphans)) {
             pcmk__schedule_cancel(rsc,
                                   crm_element_value(xml_op,
                                                     XML_LRM_ATTR_CALLID),
                                   task, interval_ms, node, "orphan");
             return true;
         } else {
             pe_rsc_debug(rsc, "%s-interval %s for %s on %s is orphaned",
                          pcmk__readable_interval(interval_ms), task, rsc->id,
                          pe__node_name(node));
             return true;
         }
     }
 
     crm_trace("Checking %s-interval %s for %s on %s for configuration changes",
               pcmk__readable_interval(interval_ms), task, rsc->id,
               pe__node_name(node));
     task = task_for_digest(task, interval_ms);
     digest_data = rsc_action_digest_cmp(rsc, xml_op, node, rsc->cluster);
 
     if (only_sanitized_changed(xml_op, digest_data, rsc->cluster)) {
         if (!pcmk__is_daemon && (rsc->cluster->priv != NULL)) {
             pcmk__output_t *out = rsc->cluster->priv;
 
             out->info(out,
                       "Only 'private' parameters to %s-interval %s for %s "
                       "on %s changed: %s",
                       pcmk__readable_interval(interval_ms), task, rsc->id,
                       pe__node_name(node),
                       crm_element_value(xml_op, XML_ATTR_TRANSITION_MAGIC));
         }
         return false;
     }
 
     switch (digest_data->rc) {
         case RSC_DIGEST_RESTART:
             crm_log_xml_debug(digest_data->params_restart, "params:restart");
             force_restart(rsc, task, interval_ms, node);
             return true;
 
         case RSC_DIGEST_ALL:
         case RSC_DIGEST_UNKNOWN:
             // Changes that can potentially be handled by an agent reload
 
             if (interval_ms > 0) {
                 /* Recurring actions aren't reloaded per se, they are just
                  * re-scheduled so the next run uses the new parameters.
                  * The old instance will be cancelled automatically.
                  */
                 crm_log_xml_debug(digest_data->params_all, "params:reschedule");
                 pcmk__reschedule_recurring(rsc, task, interval_ms, node);
 
             } else if (crm_element_value(xml_op,
                                          XML_LRM_ATTR_RESTART_DIGEST) != NULL) {
                 // Agent supports reload, so use it
                 trigger_unfencing(rsc, node,
                                   "Device parameters changed (reload)", NULL,
                                   rsc->cluster);
                 crm_log_xml_debug(digest_data->params_all, "params:reload");
                 schedule_reload((gpointer) rsc, (gpointer) node);
 
             } else {
                 pe_rsc_trace(rsc,
                              "Restarting %s "
                              "because agent doesn't support reload", rsc->id);
                 crm_log_xml_debug(digest_data->params_restart,
                                   "params:restart");
                 force_restart(rsc, task, interval_ms, node);
             }
             return true;
 
         default:
             break;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Create a list of resource's action history entries, sorted by call ID
  *
  * \param[in]  rsc_entry    Resource's <lrm_rsc_op> status XML
  * \param[out] start_index  Where to store index of start-like action, if any
  * \param[out] stop_index   Where to store index of stop action, if any
  */
 static GList *
 rsc_history_as_list(const xmlNode *rsc_entry, int *start_index, int *stop_index)
 {
     GList *ops = NULL;
 
     for (xmlNode *rsc_op = first_named_child(rsc_entry, XML_LRM_TAG_RSC_OP);
          rsc_op != NULL; rsc_op = crm_next_same_xml(rsc_op)) {
         ops = g_list_prepend(ops, rsc_op);
     }
     ops = g_list_sort(ops, sort_op_by_callid);
     calculate_active_ops(ops, start_index, stop_index);
     return ops;
 }
 
 /*!
  * \internal
  * \brief Process a resource's action history from the CIB status
  *
  * Given a resource's action history, if the resource's configuration
  * changed since the actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in]     rsc_entry  Resource's <lrm_rsc_op> status XML
  * \param[in,out] rsc        Resource whose history is being processed
  * \param[in,out] node       Node whose history is being processed
  */
 static void
 process_rsc_history(const xmlNode *rsc_entry, pe_resource_t *rsc,
                     pe_node_t *node)
 {
     int offset = -1;
     int stop_index = 0;
     int start_index = 0;
     GList *sorted_op_list = NULL;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         if (pe_rsc_is_anon_clone(pe__const_top_resource(rsc, false))) {
             pe_rsc_trace(rsc,
                          "Skipping configuration check "
                          "for orphaned clone instance %s",
                          rsc->id);
         } else {
             pe_rsc_trace(rsc,
                          "Skipping configuration check and scheduling clean-up "
                          "for orphaned resource %s", rsc->id);
             pcmk__schedule_cleanup(rsc, node, false);
         }
         return;
     }
 
     if (pe_find_node_id(rsc->running_on, node->details->id) == NULL) {
         if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, false)) {
             pcmk__schedule_cleanup(rsc, node, false);
         }
         pe_rsc_trace(rsc,
                      "Skipping configuration check for %s "
                      "because no longer active on %s",
                      rsc->id, pe__node_name(node));
         return;
     }
 
     pe_rsc_trace(rsc, "Checking for configuration changes for %s on %s",
                  rsc->id, pe__node_name(node));
 
     if (pcmk__rsc_agent_changed(rsc, node, rsc_entry, true)) {
         pcmk__schedule_cleanup(rsc, node, false);
     }
 
     sorted_op_list = rsc_history_as_list(rsc_entry, &start_index, &stop_index);
     if (start_index < stop_index) {
         return; // Resource is stopped
     }
 
     for (GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
         xmlNode *rsc_op = (xmlNode *) iter->data;
         const char *task = NULL;
         guint interval_ms = 0;
 
         if (++offset < start_index) {
             // Skip actions that happened before a start
             continue;
         }
 
         task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         crm_element_value_ms(rsc_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
 
         if ((interval_ms > 0)
             && (pcmk_is_set(rsc->flags, pe_rsc_maintenance)
                 || node->details->maintenance)) {
             // Maintenance mode cancels recurring operations
             pcmk__schedule_cancel(rsc,
                                   crm_element_value(rsc_op,
                                                     XML_LRM_ATTR_CALLID),
                                   task, interval_ms, node, "maintenance mode");
 
         } else if ((interval_ms > 0)
                    || pcmk__strcase_any_of(task, PCMK_ACTION_MONITOR,
                                            PCMK_ACTION_START,
                                            PCMK_ACTION_PROMOTE,
                                            PCMK_ACTION_MIGRATE_FROM, NULL)) {
             /* If a resource operation failed, and the operation's definition
              * has changed, clear any fail count so they can be retried fresh.
              */
 
             if (pe__bundle_needs_remote_name(rsc)) {
                 /* We haven't assigned resources to nodes yet, so if the
                  * REMOTE_CONTAINER_HACK is used, we may calculate the digest
                  * based on the literal "#uname" value rather than the properly
                  * substituted value. That would mistakenly make the action
                  * definition appear to have been changed. Defer the check until
                  * later in this case.
                  */
                 pe__add_param_check(rsc_op, rsc, node, pe_check_active,
                                     rsc->cluster);
 
             } else if (pcmk__check_action_config(rsc, node, rsc_op)
                        && (pe_get_failcount(node, rsc, NULL, pe_fc_effective,
                                             NULL) != 0)) {
                 pe__clear_failcount(rsc, node, "action definition changed",
                                     rsc->cluster);
             }
         }
     }
     g_list_free(sorted_op_list);
 }
 
 /*!
  * \internal
  * \brief Process a node's action history from the CIB status
  *
  * Given a node's resource history, if the resource's configuration changed
  * since the actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in,out] node      Node whose history is being processed
  * \param[in]     lrm_rscs  Node's <lrm_resources> from CIB status XML
  */
 static void
 process_node_history(pe_node_t *node, const xmlNode *lrm_rscs)
 {
     crm_trace("Processing node history for %s", pe__node_name(node));
     for (const xmlNode *rsc_entry = first_named_child(lrm_rscs,
                                                       XML_LRM_TAG_RESOURCE);
          rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
 
         if (rsc_entry->children != NULL) {
             GList *result = pcmk__rscs_matching_id(ID(rsc_entry),
                                                    node->details->data_set);
 
             for (GList *iter = result; iter != NULL; iter = iter->next) {
                 pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
                 if (rsc->variant == pcmk_rsc_variant_primitive) {
                     process_rsc_history(rsc_entry, rsc, node);
                 }
             }
             g_list_free(result);
         }
     }
 }
 
 // XPath to find a node's resource history
 #define XPATH_NODE_HISTORY "/" XML_TAG_CIB "/" XML_CIB_TAG_STATUS             \
                            "/" XML_CIB_TAG_STATE "[@" XML_ATTR_UNAME "='%s']" \
                            "/" XML_CIB_TAG_LRM "/" XML_LRM_TAG_RESOURCES
 
 /*!
  * \internal
  * \brief Process any resource configuration changes in the CIB status
  *
  * Go through all nodes' resource history, and if a resource's configuration
  * changed since its actions were done, schedule any actions needed (restart,
  * reload, unfencing, rescheduling recurring actions, clean-up, etc.).
  * (This also cancels recurring actions for maintenance mode, which is not
  * entirely related but convenient to do here.)
  *
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__handle_rsc_config_changes(pe_working_set_t *data_set)
 {
     crm_trace("Check resource and action configuration for changes");
 
     /* Rather than iterate through the status section, iterate through the nodes
      * and search for the appropriate status subsection for each. This skips
      * orphaned nodes and lets us eliminate some cases before searching the XML.
      */
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
 
         /* Don't bother checking actions for a node that can't run actions ...
          * unless it's in maintenance mode, in which case we still need to
          * cancel any existing recurring monitors.
          */
         if (node->details->maintenance
             || pcmk__node_available(node, false, false)) {
 
             char *xpath = NULL;
             xmlNode *history = NULL;
 
             xpath = crm_strdup_printf(XPATH_NODE_HISTORY, node->details->uname);
             history = get_xpath_object(xpath, data_set->input, LOG_NEVER);
             free(xpath);
 
             process_node_history(node, history);
         }
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index d1e4eba146..0d77a97e74 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1051 +1,1051 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 struct assign_data {
     const pe_node_t *prefer;
     bool stop_if_fail;
 };
 
 /*!
  * \internal
  * \brief Assign a single bundle replica's resources (other than container)
  *
  * \param[in,out] replica    Replica to assign
  * \param[in]     user_data  Preferred node, if any
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 assign_replica(pe__bundle_replica_t *replica, void *user_data)
 {
     pe_node_t *container_host = NULL;
 
     struct assign_data *assign_data = user_data;
     const pe_node_t *prefer = assign_data->prefer;
     bool stop_if_fail = assign_data->stop_if_fail;
 
     const pe_resource_t *bundle = pe__const_top_resource(replica->container,
                                                          true);
 
     if (replica->ip != NULL) {
         pe_rsc_trace(bundle, "Assigning bundle %s IP %s",
                      bundle->id, replica->ip->id);
         replica->ip->cmds->assign(replica->ip, prefer, stop_if_fail);
     }
 
     container_host = replica->container->allocated_to;
     if (replica->remote != NULL) {
         if (pe__is_guest_or_remote_node(container_host)) {
             /* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on
              * the same host because Pacemaker Remote only supports a single
              * active connection.
              */
             pcmk__new_colocation("#replica-remote-with-host-remote", NULL,
                                  INFINITY, replica->remote,
                                  container_host->details->remote_rsc, NULL,
                                  NULL, pcmk__coloc_influence);
         }
         pe_rsc_trace(bundle, "Assigning bundle %s connection %s",
                      bundle->id, replica->remote->id);
         replica->remote->cmds->assign(replica->remote, prefer, stop_if_fail);
     }
 
     if (replica->child != NULL) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
             if (!pe__same_node(node, replica->node)) {
                 node->weight = -INFINITY;
             } else if (!pcmk__threshold_reached(replica->child, node, NULL)) {
                 node->weight = INFINITY;
             }
         }
 
         pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
         pe_rsc_trace(bundle, "Assigning bundle %s replica child %s",
                      bundle->id, replica->child->id);
         replica->child->cmds->assign(replica->child, replica->node,
                                      stop_if_fail);
         pe__clear_resource_flags(replica->child->parent, pe_rsc_allocating);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Assign a bundle resource to a node
  *
  * \param[in,out] rsc           Resource to assign to a node
  * \param[in]     prefer        Node to prefer, if all else is equal
  * \param[in]     stop_if_fail  If \c true and a primitive descendant of \p rsc
  *                              can't be assigned to a node, set the
  *                              descendant's next role to stopped and update
  *                              existing actions
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  *
  * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
  *       completely undo the assignment. A successful assignment can be either
  *       undone or left alone as final. A failed assignment has the same effect
  *       as calling pcmk__unassign_resource(); there are no side effects on
  *       roles or actions.
  */
 pe_node_t *
 pcmk__bundle_assign(pe_resource_t *rsc, const pe_node_t *prefer,
                     bool stop_if_fail)
 {
     GList *containers = NULL;
     pe_resource_t *bundled_resource = NULL;
     struct assign_data assign_data = { prefer, stop_if_fail };
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
 
     pe_rsc_trace(rsc, "Assigning bundle %s", rsc->id);
     pe__set_resource_flags(rsc, pe_rsc_allocating);
 
     pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                          rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     // Assign all containers first, so we know what nodes the bundle will be on
     containers = g_list_sort(pe__bundle_containers(rsc), pcmk__cmp_instance);
     pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc),
                            rsc->fns->max_per_node(rsc));
     g_list_free(containers);
 
     // Then assign remaining replica resources
     pe__foreach_bundle_replica(rsc, assign_replica, (void *) &assign_data);
 
     // Finally, assign the bundled resources to each bundle node
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource != NULL) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
             if (pe__node_is_bundle_instance(rsc, node)) {
                 node->weight = 0;
             } else {
                 node->weight = -INFINITY;
             }
         }
         bundled_resource->cmds->assign(bundled_resource, prefer, stop_if_fail);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Create actions for a bundle replica's resources (other than child)
  *
  * \param[in,out] replica    Replica to create actions for
  * \param[in]     user_data  Unused
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 create_replica_actions(pe__bundle_replica_t *replica, void *user_data)
 {
     if (replica->ip != NULL) {
         replica->ip->cmds->create_actions(replica->ip);
     }
     if (replica->container != NULL) {
         replica->container->cmds->create_actions(replica->container);
     }
     if (replica->remote != NULL) {
         replica->remote->cmds->create_actions(replica->remote);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Create all actions needed for a given bundle resource
  *
  * \param[in,out] rsc  Bundle resource to create actions for
  */
 void
 pcmk__bundle_create_actions(pe_resource_t *rsc)
 {
     pe_action_t *action = NULL;
     GList *containers = NULL;
     pe_resource_t *bundled_resource = NULL;
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
 
     pe__foreach_bundle_replica(rsc, create_replica_actions, NULL);
 
     containers = pe__bundle_containers(rsc);
     pcmk__create_instance_actions(rsc, containers);
     g_list_free(containers);
 
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource != NULL) {
         bundled_resource->cmds->create_actions(bundled_resource);
 
         if (pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) {
             pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_PROMOTED,
                                                true, true);
             action->priority = INFINITY;
 
             pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, PCMK_ACTION_DEMOTED,
                                                true, true);
             action->priority = INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create internal constraints for a bundle replica's resources
  *
  * \param[in,out] replica    Replica to create internal constraints for
  * \param[in,out] user_data  Replica's parent bundle
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 replica_internal_constraints(pe__bundle_replica_t *replica, void *user_data)
 {
     pe_resource_t *bundle = user_data;
 
     replica->container->cmds->internal_constraints(replica->container);
 
     // Start bundle -> start replica container
     pcmk__order_starts(bundle, replica->container,
                        pe_order_runnable_left|pe_order_implies_first_printed);
 
     // Stop bundle -> stop replica child and container
     if (replica->child != NULL) {
         pcmk__order_stops(bundle, replica->child,
                           pe_order_implies_first_printed);
     }
     pcmk__order_stops(bundle, replica->container,
                       pe_order_implies_first_printed);
 
     // Start replica container -> bundle is started
     pcmk__order_resource_actions(replica->container, PCMK_ACTION_START, bundle,
                                  PCMK_ACTION_RUNNING,
                                  pe_order_implies_then_printed);
 
     // Stop replica container -> bundle is stopped
     pcmk__order_resource_actions(replica->container, PCMK_ACTION_STOP, bundle,
                                  PCMK_ACTION_STOPPED,
                                  pe_order_implies_then_printed);
 
     if (replica->ip != NULL) {
         replica->ip->cmds->internal_constraints(replica->ip);
 
         // Replica IP address -> replica container (symmetric)
         pcmk__order_starts(replica->ip, replica->container,
                            pe_order_runnable_left|pe_order_preserve);
         pcmk__order_stops(replica->container, replica->ip,
                           pe_order_implies_first|pe_order_preserve);
 
         pcmk__new_colocation("#ip-with-container", NULL, INFINITY, replica->ip,
                              replica->container, NULL, NULL,
                              pcmk__coloc_influence);
     }
 
     if (replica->remote != NULL) {
         /* This handles ordering and colocating remote relative to container
          * (via "#resource-with-container"). Since IP is also ordered and
          * colocated relative to the container, we don't need to do anything
          * explicit here with IP.
          */
         replica->remote->cmds->internal_constraints(replica->remote);
     }
 
     if (replica->child != NULL) {
         CRM_ASSERT(replica->remote != NULL);
         // "Start remote then child" is implicit in scheduler's remote logic
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Create implicit constraints needed for a bundle resource
  *
  * \param[in,out] rsc  Bundle resource to create implicit constraints for
  */
 void
 pcmk__bundle_internal_constraints(pe_resource_t *rsc)
 {
     pe_resource_t *bundled_resource = NULL;
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
 
     pe__foreach_bundle_replica(rsc, replica_internal_constraints, rsc);
 
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource == NULL) {
         return;
     }
 
     // Start bundle -> start bundled clone
     pcmk__order_resource_actions(rsc, PCMK_ACTION_START, bundled_resource,
                                  PCMK_ACTION_START,
                                  pe_order_implies_first_printed);
 
     // Bundled clone is started -> bundle is started
     pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_RUNNING,
                                  rsc, PCMK_ACTION_RUNNING,
                                  pe_order_implies_then_printed);
 
     // Stop bundle -> stop bundled clone
     pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP, bundled_resource,
                                  PCMK_ACTION_STOP,
                                  pe_order_implies_first_printed);
 
     // Bundled clone is stopped -> bundle is stopped
     pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_STOPPED,
                                  rsc, PCMK_ACTION_STOPPED,
                                  pe_order_implies_then_printed);
 
     bundled_resource->cmds->internal_constraints(bundled_resource);
 
     if (!pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) {
         return;
     }
     pcmk__promotable_restart_ordering(rsc);
 
     // Demote bundle -> demote bundled clone
     pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTE, bundled_resource,
                                  PCMK_ACTION_DEMOTE,
                                  pe_order_implies_first_printed);
 
     // Bundled clone is demoted -> bundle is demoted
     pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_DEMOTED,
                                  rsc, PCMK_ACTION_DEMOTED,
                                  pe_order_implies_then_printed);
 
     // Promote bundle -> promote bundled clone
     pcmk__order_resource_actions(rsc, PCMK_ACTION_PROMOTE,
                                  bundled_resource, PCMK_ACTION_PROMOTE,
                                  pe_order_implies_first_printed);
 
     // Bundled clone is promoted -> bundle is promoted
     pcmk__order_resource_actions(bundled_resource, PCMK_ACTION_PROMOTED,
                                  rsc, PCMK_ACTION_PROMOTED,
                                  pe_order_implies_then_printed);
 }
 
 struct match_data {
     const pe_node_t *node;     // Node to compare against replica
     pe_resource_t *container;  // Replica container corresponding to node
 };
 
 /*!
  * \internal
  * \brief Check whether a replica container is assigned to a given node
  *
  * \param[in]     replica    Replica to check
  * \param[in,out] user_data  struct match_data with node to compare against
  *
  * \return true if the replica does not match (to indicate further replicas
  *         should be processed), otherwise false
  */
 static bool
 match_replica_container(const pe__bundle_replica_t *replica, void *user_data)
 {
     struct match_data *match_data = user_data;
 
     if (pcmk__instance_matches(replica->container, match_data->node,
                                pcmk_role_unknown, false)) {
         match_data->container = replica->container;
         return false; // Match found, don't bother searching further replicas
     }
     return true; // No match, keep searching
 }
 
 /*!
  * \internal
  * \brief Get the host to which a bundle node is assigned
  *
  * \param[in] node  Possible bundle node to check
  *
  * \return Node to which the container for \p node is assigned if \p node is a
  *         bundle node, otherwise \p node itself
  */
 static const pe_node_t *
 get_bundle_node_host(const pe_node_t *node)
 {
     if (pe__is_bundle_node(node)) {
         const pe_resource_t *container = node->details->remote_rsc->container;
 
         return container->fns->location(container, NULL, 0);
     }
     return node;
 }
 
 /*!
  * \internal
  * \brief Find a bundle container compatible with a dependent resource
  *
  * \param[in] dependent  Dependent resource in colocation with bundle
  * \param[in] bundle     Bundle that \p dependent is colocated with
  *
  * \return A container from \p bundle assigned to the same node as \p dependent
  *         if assigned, otherwise assigned to any of dependent's allowed nodes,
  *         otherwise NULL.
  */
 static pe_resource_t *
 compatible_container(const pe_resource_t *dependent,
                      const pe_resource_t *bundle)
 {
     GList *scratch = NULL;
     struct match_data match_data = { NULL, NULL };
 
     // If dependent is assigned, only check there
     match_data.node = dependent->fns->location(dependent, NULL, 0);
     match_data.node = get_bundle_node_host(match_data.node);
     if (match_data.node != NULL) {
         pe__foreach_const_bundle_replica(bundle, match_replica_container,
                                          &match_data);
         return match_data.container;
     }
 
     // Otherwise, check for any of the dependent's allowed nodes
     scratch = g_hash_table_get_values(dependent->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL);
     for (const GList *iter = scratch; iter != NULL; iter = iter->next) {
         match_data.node = iter->data;
         match_data.node = get_bundle_node_host(match_data.node);
         if (match_data.node == NULL) {
             continue;
         }
 
         pe__foreach_const_bundle_replica(bundle, match_replica_container,
                                          &match_data);
         if (match_data.container != NULL) {
             break;
         }
     }
     g_list_free(scratch);
     return match_data.container;
 }
 
 struct coloc_data {
     const pcmk__colocation_t *colocation;
     pe_resource_t *dependent;
     GList *container_hosts;
 };
 
 /*!
  * \internal
  * \brief Apply a colocation score to replica node scores or resource priority
  *
  * \param[in]     replica    Replica of primary bundle resource in colocation
  * \param[in,out] user_data  struct coloc_data for colocation being applied
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 replica_apply_coloc_score(const pe__bundle_replica_t *replica, void *user_data)
 {
     struct coloc_data *coloc_data = user_data;
     pe_node_t *chosen = NULL;
 
     if (coloc_data->colocation->score < INFINITY) {
         replica->container->cmds->apply_coloc_score(coloc_data->dependent,
                                                     replica->container,
                                                     coloc_data->colocation,
                                                     false);
         return true;
     }
 
     chosen = replica->container->fns->location(replica->container, NULL, 0);
     if ((chosen == NULL)
         || is_set_recursive(replica->container, pe_rsc_block, true)) {
         return true;
     }
 
     if ((coloc_data->colocation->primary_role >= pcmk_role_promoted)
         && ((replica->child == NULL)
             || (replica->child->next_role < pcmk_role_promoted))) {
         return true;
     }
 
     pe_rsc_trace(pe__const_top_resource(replica->container, true),
                  "Allowing mandatory colocation %s using %s @%d",
                  coloc_data->colocation->id, pe__node_name(chosen),
                  chosen->weight);
     coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts,
                                                  chosen);
     return true;
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node scores or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node scores (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
                                const pe_resource_t *primary,
                                const pcmk__colocation_t *colocation,
                                bool for_dependent)
 {
     struct coloc_data coloc_data = { colocation, dependent, NULL };
 
     /* This should never be called for the bundle itself as a dependent.
      * Instead, we add its colocation constraints to its containers and bundled
      * primitive and call the apply_coloc_score() method for them as dependents.
      */
     CRM_ASSERT((primary != NULL) && (primary->variant == pe_container)
                && (dependent != NULL)
                && (dependent->variant == pcmk_rsc_variant_primitive)
                && (colocation != NULL) && !for_dependent);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary,
                      "Skipping applying colocation %s "
                      "because %s is still provisional",
                      colocation->id, primary->id);
         return;
     }
     pe_rsc_trace(primary, "Applying colocation %s (%s with %s at %s)",
                  colocation->id, dependent->id, primary->id,
                  pcmk_readable_score(colocation->score));
 
     /* If the constraint dependent is a clone or bundle, "dependent" here is one
      * of its instances. Look for a compatible instance of this bundle.
      */
-    if (colocation->dependent->variant > pe_group) {
+    if (colocation->dependent->variant > pcmk_rsc_variant_group) {
         const pe_resource_t *primary_container = compatible_container(dependent,
                                                                       primary);
 
         if (primary_container != NULL) { // Success, we found one
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_container->id);
             dependent->cmds->apply_coloc_score(dependent, primary_container,
                                                colocation, true);
 
         } else if (colocation->score >= INFINITY) { // Failure, and it's fatal
             crm_notice("%s cannot run because there is no compatible "
                        "instance of %s to colocate with",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true, true);
 
         } else { // Failure, but we can ignore it
             pe_rsc_debug(primary,
                          "%s cannot be colocated with any instance of %s",
                          dependent->id, primary->id);
         }
         return;
     }
 
     pe__foreach_const_bundle_replica(primary, replica_apply_coloc_score,
                                      &coloc_data);
 
     if (colocation->score >= INFINITY) {
         pcmk__colocation_intersect_nodes(dependent, primary, colocation,
                                          coloc_data.container_hosts, false);
     }
     g_list_free(coloc_data.container_hosts);
 }
 
 // Bundle implementation of resource_alloc_functions_t:with_this_colocations()
 void
 pcmk__with_bundle_colocations(const pe_resource_t *rsc,
                               const pe_resource_t *orig_rsc, GList **list)
 {
     const pe_resource_t *bundled_rsc = NULL;
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)
                && (orig_rsc != NULL) && (list != NULL));
 
     // The bundle itself and its containers always get its colocations
     if ((orig_rsc == rsc)
         || pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
 
         pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
         return;
     }
 
     /* The bundled resource gets the colocations if it's promotable and we've
      * begun choosing roles
      */
     bundled_rsc = pe__bundled_resource(rsc);
     if ((bundled_rsc == NULL)
         || !pcmk_is_set(bundled_rsc->flags, pe_rsc_promotable)
         || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
         return;
     }
 
     if (orig_rsc == bundled_rsc) {
         if (pe__clone_flag_is_set(orig_rsc, pe__clone_promotion_constrained)) {
             /* orig_rsc is the clone and we're setting roles (or have already
              * done so)
              */
             pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
         }
 
     } else if (!pcmk_is_set(orig_rsc->flags, pe_rsc_provisional)) {
         /* orig_rsc is an instance and is already assigned. If something
          * requests colocations for orig_rsc now, it's for setting roles.
          */
         pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:this_with_colocations()
 void
 pcmk__bundle_with_colocations(const pe_resource_t *rsc,
                               const pe_resource_t *orig_rsc, GList **list)
 {
     const pe_resource_t *bundled_rsc = NULL;
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)
                && (orig_rsc != NULL) && (list != NULL));
 
     // The bundle itself and its containers always get its colocations
     if ((orig_rsc == rsc)
         || pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
 
         pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
         return;
     }
 
     /* The bundled resource gets the colocations if it's promotable and we've
      * begun choosing roles
      */
     bundled_rsc = pe__bundled_resource(rsc);
     if ((bundled_rsc == NULL)
         || !pcmk_is_set(bundled_rsc->flags, pe_rsc_promotable)
         || (pe__const_top_resource(orig_rsc, false) != bundled_rsc)) {
         return;
     }
 
     if (orig_rsc == bundled_rsc) {
         if (pe__clone_flag_is_set(orig_rsc, pe__clone_promotion_constrained)) {
             /* orig_rsc is the clone and we're setting roles (or have already
              * done so)
              */
             pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
         }
 
     } else if (!pcmk_is_set(orig_rsc->flags, pe_rsc_provisional)) {
         /* orig_rsc is an instance and is already assigned. If something
          * requests colocations for orig_rsc now, it's for setting roles.
          */
         pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Return action flags for a given bundle resource action
  *
  * \param[in,out] action  Bundle resource action to get flags for
  * \param[in]     node    If not NULL, limit effects to this node
  *
  * \return Flags appropriate to \p action on \p node
  */
 uint32_t
 pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     GList *containers = NULL;
     uint32_t flags = 0;
     pe_resource_t *bundled_resource = NULL;
 
     CRM_ASSERT((action != NULL) && (action->rsc != NULL)
                && (action->rsc->variant == pe_container));
 
     bundled_resource = pe__bundled_resource(action->rsc);
     if (bundled_resource != NULL) {
         // Clone actions are done on the bundled clone resource, not container
         switch (get_complex_task(bundled_resource, action->task)) {
             case pcmk_action_unspecified:
             case pcmk_action_notify:
             case pcmk_action_notified:
             case pcmk_action_promote:
             case pcmk_action_promoted:
             case pcmk_action_demote:
             case pcmk_action_demoted:
                 return pcmk__collective_action_flags(action,
                                                      bundled_resource->children,
                                                      node);
             default:
                 break;
         }
     }
 
     containers = pe__bundle_containers(action->rsc);
     flags = pcmk__collective_action_flags(action, containers, node);
     g_list_free(containers);
     return flags;
 }
 
 /*!
  * \internal
  * \brief Apply a location constraint to a bundle replica
  *
  * \param[in,out] replica    Replica to apply constraint to
  * \param[in,out] user_data  Location constraint to apply
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 apply_location_to_replica(pe__bundle_replica_t *replica, void *user_data)
 {
     pe__location_t *location = user_data;
 
     if (replica->container != NULL) {
         replica->container->cmds->apply_location(replica->container, location);
     }
     if (replica->ip != NULL) {
         replica->ip->cmds->apply_location(replica->ip, location);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Apply a location constraint to a bundle resource's allowed node scores
  *
  * \param[in,out] rsc       Bundle resource to apply constraint to
  * \param[in,out] location  Location constraint to apply
  */
 void
 pcmk__bundle_apply_location(pe_resource_t *rsc, pe__location_t *location)
 {
     pe_resource_t *bundled_resource = NULL;
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container)
                && (location != NULL));
 
     pcmk__apply_location(rsc, location);
     pe__foreach_bundle_replica(rsc, apply_location_to_replica, location);
 
     bundled_resource = pe__bundled_resource(rsc);
     if ((bundled_resource != NULL)
         && ((location->role_filter == pcmk_role_unpromoted)
             || (location->role_filter == pcmk_role_promoted))) {
         bundled_resource->cmds->apply_location(bundled_resource, location);
         bundled_resource->rsc_location = g_list_prepend(
             bundled_resource->rsc_location, location);
     }
 }
 
 #define XPATH_REMOTE "//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']"
 
 /*!
  * \internal
  * \brief Add a bundle replica's actions to transition graph
  *
  * \param[in,out] replica    Replica to add to graph
  * \param[in]     user_data  Bundle that replica belongs to (for logging only)
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 add_replica_actions_to_graph(pe__bundle_replica_t *replica, void *user_data)
 {
     if ((replica->remote != NULL) && (replica->container != NULL)
         && pe__bundle_needs_remote_name(replica->remote)) {
 
         /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
          * run pacemaker-remoted inside, without needing a separate IP for
          * the container. This is done by configuring the inner remote's
          * connection host as the magic string "#uname", then
          * replacing it with the underlying host when needed.
          */
         xmlNode *nvpair = get_xpath_object(XPATH_REMOTE, replica->remote->xml,
                                            LOG_ERR);
         const char *calculated_addr = NULL;
 
         // Replace the value in replica->remote->xml (if appropriate)
         calculated_addr = pe__add_bundle_remote_name(replica->remote,
                                                      replica->remote->cluster,
                                                      nvpair, "value");
         if (calculated_addr != NULL) {
             /* Since this is for the bundle as a resource, and not any
              * particular action, replace the value in the default
              * parameters (not evaluated for node). create_graph_action()
              * will grab it from there to replace it in node-evaluated
              * parameters.
              */
             GHashTable *params = pe_rsc_params(replica->remote,
                                                NULL, replica->remote->cluster);
 
             g_hash_table_replace(params,
                                  strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                  strdup(calculated_addr));
         } else {
             pe_resource_t *bundle = user_data;
 
             /* The only way to get here is if the remote connection is
              * neither currently running nor scheduled to run. That means we
              * won't be doing any operations that require addr (only start
              * requires it; we additionally use it to compare digests when
              * unpacking status, promote, and migrate_from history, but
              * that's already happened by this point).
              */
             pe_rsc_info(bundle,
                         "Unable to determine address for bundle %s "
                         "remote connection", bundle->id);
         }
     }
     if (replica->ip != NULL) {
         replica->ip->cmds->add_actions_to_graph(replica->ip);
     }
     if (replica->container != NULL) {
         replica->container->cmds->add_actions_to_graph(replica->container);
     }
     if (replica->remote != NULL) {
         replica->remote->cmds->add_actions_to_graph(replica->remote);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Add a bundle resource's actions to the transition graph
  *
  * \param[in,out] rsc  Bundle resource whose actions should be added
  */
 void
 pcmk__bundle_add_actions_to_graph(pe_resource_t *rsc)
 {
     pe_resource_t *bundled_resource = NULL;
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
 
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource != NULL) {
         bundled_resource->cmds->add_actions_to_graph(bundled_resource);
     }
     pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, rsc);
 }
 
 struct probe_data {
     pe_resource_t *bundle;  // Bundle being probed
     pe_node_t *node;        // Node to create probes on
     bool any_created;       // Whether any probes have been created
 };
 
 /*!
  * \internal
  * \brief Order a bundle replica's start after another replica's probe
  *
  * \param[in,out] replica    Replica to order start for
  * \param[in,out] user_data  Replica with probe to order after
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 order_replica_start_after(pe__bundle_replica_t *replica, void *user_data)
 {
     pe__bundle_replica_t *probed_replica = user_data;
 
     if ((replica == probed_replica) || (replica->container == NULL)) {
         return true;
     }
     pcmk__new_ordering(probed_replica->container,
                        pcmk__op_key(probed_replica->container->id,
                                     PCMK_ACTION_MONITOR, 0),
                        NULL, replica->container,
                        pcmk__op_key(replica->container->id, PCMK_ACTION_START,
                                     0),
                        NULL, pe_order_optional|pe_order_same_node,
                        replica->container->cluster);
     return true;
 }
 
 /*!
  * \internal
  * \brief Create probes for a bundle replica's resources
  *
  * \param[in,out] replica    Replica to create probes for
  * \param[in,out] user_data  struct probe_data
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 create_replica_probes(pe__bundle_replica_t *replica, void *user_data)
 {
     struct probe_data *probe_data = user_data;
 
     if ((replica->ip != NULL)
         && replica->ip->cmds->create_probe(replica->ip, probe_data->node)) {
         probe_data->any_created = true;
     }
     if ((replica->child != NULL)
         && pe__same_node(probe_data->node, replica->node)
         && replica->child->cmds->create_probe(replica->child,
                                               probe_data->node)) {
         probe_data->any_created = true;
     }
     if ((replica->container != NULL)
         && replica->container->cmds->create_probe(replica->container,
                                                   probe_data->node)) {
         probe_data->any_created = true;
 
         /* If we're limited to one replica per host (due to
          * the lack of an IP range probably), then we don't
          * want any of our peer containers starting until
          * we've established that no other copies are already
          * running.
          *
          * Partly this is to ensure that the maximum replicas per host is
          * observed, but also to ensure that the containers
          * don't fail to start because the necessary port
          * mappings (which won't include an IP for uniqueness)
          * are already taken
          */
         if (probe_data->bundle->fns->max_per_node(probe_data->bundle) == 1) {
             pe__foreach_bundle_replica(probe_data->bundle,
                                        order_replica_start_after, replica);
         }
     }
     if ((replica->container != NULL) && (replica->remote != NULL)
         && replica->remote->cmds->create_probe(replica->remote,
                                                probe_data->node)) {
         /* Do not probe the remote resource until we know where the container is
          * running. This is required for REMOTE_CONTAINER_HACK to correctly
          * probe remote resources.
          */
         char *probe_uuid = pcmk__op_key(replica->remote->id,
                                         PCMK_ACTION_MONITOR, 0);
         pe_action_t *probe = find_first_action(replica->remote->actions,
                                                probe_uuid, NULL,
                                                probe_data->node);
 
         free(probe_uuid);
         if (probe != NULL) {
             probe_data->any_created = true;
             pe_rsc_trace(probe_data->bundle, "Ordering %s probe on %s",
                          replica->remote->id, pe__node_name(probe_data->node));
             pcmk__new_ordering(replica->container,
                                pcmk__op_key(replica->container->id,
                                             PCMK_ACTION_START, 0),
                                NULL, replica->remote, NULL, probe,
                                pe_order_probe, probe_data->bundle->cluster);
         }
     }
     return true;
 }
 
 /*!
  * \internal
  *
  * \brief Schedule any probes needed for a bundle resource on a node
  *
  * \param[in,out] rsc   Bundle resource to create probes for
  * \param[in,out] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node)
 {
     struct probe_data probe_data = { rsc, node, false };
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
     pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data);
     return probe_data.any_created;
 }
 
 /*!
  * \internal
  * \brief Output actions for one bundle replica
  *
  * \param[in,out] replica    Replica to output actions for
  * \param[in]     user_data  Unused
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 output_replica_actions(pe__bundle_replica_t *replica, void *user_data)
 {
     if (replica->ip != NULL) {
         replica->ip->cmds->output_actions(replica->ip);
     }
     if (replica->container != NULL) {
         replica->container->cmds->output_actions(replica->container);
     }
     if (replica->remote != NULL) {
         replica->remote->cmds->output_actions(replica->remote);
     }
     if (replica->child != NULL) {
         replica->child->cmds->output_actions(replica->child);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Output a summary of scheduled actions for a bundle resource
  *
  * \param[in,out] rsc  Bundle resource to output actions for
  */
 void
 pcmk__output_bundle_actions(pe_resource_t *rsc)
 {
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
     pe__foreach_bundle_replica(rsc, output_replica_actions, NULL);
 }
 
 // Bundle implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__bundle_add_utilization(const pe_resource_t *rsc,
                              const pe_resource_t *orig_rsc, GList *all_rscs,
                              GHashTable *utilization)
 {
     pe_resource_t *container = NULL;
 
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     /* All bundle replicas are identical, so using the utilization of the first
      * is sufficient for any. Only the implicit container resource can have
      * utilization values.
      */
     container = pe__first_container(rsc);
     if (container != NULL) {
         container->cmds->add_utilization(container, orig_rsc, all_rscs,
                                          utilization);
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
 {
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_container));
     // Bundles currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index 7c427be09b..be8c008db0 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -1,708 +1,708 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Assign a clone resource's instances to nodes
  *
  * \param[in,out] rsc           Clone resource to assign
  * \param[in]     prefer        Node to prefer, if all else is equal
  * \param[in]     stop_if_fail  If \c true and a primitive descendant of \p rsc
  *                              can't be assigned to a node, set the
  *                              descendant's next role to stopped and update
  *                              existing actions
  *
  * \return NULL (clones are not assigned to a single node)
  *
  * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
  *       completely undo the assignment. A successful assignment can be either
  *       undone or left alone as final. A failed assignment has the same effect
  *       as calling pcmk__unassign_resource(); there are no side effects on
  *       roles or actions.
  */
 pe_node_t *
 pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer,
                    bool stop_if_fail)
 {
     GList *colocations = NULL;
 
     CRM_ASSERT(pe_rsc_is_clone(rsc));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return NULL; // Assignment has already been done
     }
 
     // Detect assignment loops
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
         return NULL;
     }
     pe__set_resource_flags(rsc, pe_rsc_allocating);
 
     // If this clone is promotable, consider nodes' promotion scores
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__add_promotion_scores(rsc);
     }
 
     // If this clone is colocated with any other resources, assign those first
     colocations = pcmk__this_with_colocations(rsc);
     for (GList *iter = colocations; iter != NULL; iter = iter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) iter->data;
 
         pe_rsc_trace(rsc, "%s: Assigning colocation %s primary %s first",
                      rsc->id, constraint->id, constraint->primary->id);
         constraint->primary->cmds->assign(constraint->primary, prefer,
                                           stop_if_fail);
     }
     g_list_free(colocations);
 
     // If any resources are colocated with this one, consider their preferences
     colocations = pcmk__with_this_colocations(rsc);
     g_list_foreach(colocations, pcmk__add_dependent_scores, rsc);
     g_list_free(colocations);
 
     pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                          rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
     pcmk__assign_instances(rsc, rsc->children, pe__clone_max(rsc),
                            pe__clone_node_max(rsc));
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__set_instance_roles(rsc);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
     pe_rsc_trace(rsc, "Assigned clone %s", rsc->id);
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Create all actions needed for a given clone resource
  *
  * \param[in,out] rsc  Clone resource to create actions for
  */
 void
 pcmk__clone_create_actions(pe_resource_t *rsc)
 {
     CRM_ASSERT(pe_rsc_is_clone(rsc));
 
     pe_rsc_trace(rsc, "Creating actions for clone %s", rsc->id);
     pcmk__create_instance_actions(rsc, rsc->children);
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__create_promotable_actions(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Create implicit constraints needed for a clone resource
  *
  * \param[in,out] rsc  Clone resource to create implicit constraints for
  */
 void
 pcmk__clone_internal_constraints(pe_resource_t *rsc)
 {
     bool ordered = false;
 
     CRM_ASSERT(pe_rsc_is_clone(rsc));
 
     pe_rsc_trace(rsc, "Creating internal constraints for clone %s", rsc->id);
 
     // Restart ordering: Stop -> stopped -> start -> started
     pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
                                  rsc, PCMK_ACTION_START,
                                  pe_order_optional);
     pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
                                  rsc, PCMK_ACTION_RUNNING,
                                  pe_order_runnable_left);
     pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
                                  rsc, PCMK_ACTION_STOPPED,
                                  pe_order_runnable_left);
 
     // Demoted -> stop and started -> promote
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__order_resource_actions(rsc, PCMK_ACTION_DEMOTED,
                                      rsc, PCMK_ACTION_STOP,
                                      pe_order_optional);
         pcmk__order_resource_actions(rsc, PCMK_ACTION_RUNNING,
                                      rsc, PCMK_ACTION_PROMOTE,
                                      pe_order_runnable_left);
     }
 
     ordered = pe__clone_is_ordered(rsc);
     if (ordered) {
         /* Ordered clone instances must start and stop by instance number. The
          * instances might have been previously shuffled for assignment or
          * promotion purposes, so re-sort them.
          */
         rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     }
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         instance->cmds->internal_constraints(instance);
 
         // Start clone -> start instance -> clone started
         pcmk__order_starts(rsc, instance, pe_order_runnable_left
                                           |pe_order_implies_first_printed);
         pcmk__order_resource_actions(instance, PCMK_ACTION_START,
                                      rsc, PCMK_ACTION_RUNNING,
                                      pe_order_implies_then_printed);
 
         // Stop clone -> stop instance -> clone stopped
         pcmk__order_stops(rsc, instance, pe_order_implies_first_printed);
         pcmk__order_resource_actions(instance, PCMK_ACTION_STOP,
                                      rsc, PCMK_ACTION_STOPPED,
                                      pe_order_implies_then_printed);
 
         /* Instances of ordered clones must be started and stopped by instance
          * number. Since only some instances may be starting or stopping, order
          * each instance relative to every later instance.
          */
         if (ordered) {
             for (GList *later = iter->next;
                  later != NULL; later = later->next) {
                 pcmk__order_starts(instance, (pe_resource_t *) later->data,
                                    pe_order_optional);
                 pcmk__order_stops((pe_resource_t *) later->data, instance,
                                   pe_order_optional);
             }
         }
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__order_promotable_instances(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether colocated resources can be interleaved
  *
  * \param[in] colocation  Colocation constraint with clone as primary
  *
  * \return true if colocated resources can be interleaved, otherwise false
  */
 static bool
 can_interleave(const pcmk__colocation_t *colocation)
 {
     const pe_resource_t *dependent = colocation->dependent;
 
     // Only colocations between clone or bundle resources use interleaving
-    if (dependent->variant <= pe_group) {
+    if (dependent->variant <= pcmk_rsc_variant_group) {
         return false;
     }
 
     // Only the dependent needs to be marked for interleaving
     if (!crm_is_true(g_hash_table_lookup(dependent->meta,
                                          XML_RSC_ATTR_INTERLEAVE))) {
         return false;
     }
 
     /* @TODO Do we actually care about multiple primary instances sharing a
      * dependent instance?
      */
     if (dependent->fns->max_per_node(dependent)
         != colocation->primary->fns->max_per_node(colocation->primary)) {
         pcmk__config_err("Cannot interleave %s and %s because they do not "
                          "support the same number of instances per node",
                          dependent->id, colocation->primary->id);
         return false;
     }
 
     return true;
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node scores or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node scores (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
                               const pe_resource_t *primary,
                               const pcmk__colocation_t *colocation,
                               bool for_dependent)
 {
     const GList *iter = NULL;
 
     /* This should never be called for the clone itself as a dependent. Instead,
      * we add its colocation constraints to its instances and call the
      * apply_coloc_score() method for the instances as dependents.
      */
     CRM_ASSERT(!for_dependent);
 
     CRM_ASSERT((colocation != NULL) && pe_rsc_is_clone(primary)
                && (dependent != NULL)
                && (dependent->variant == pcmk_rsc_variant_primitive));
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary,
                      "Delaying processing colocation %s "
                      "because cloned primary %s is still provisional",
                      colocation->id, primary->id);
         return;
     }
 
     pe_rsc_trace(primary, "Processing colocation %s (%s with clone %s @%s)",
                  colocation->id, dependent->id, primary->id,
                  pcmk_readable_score(colocation->score));
 
     // Apply role-specific colocations
     if (pcmk_is_set(primary->flags, pe_rsc_promotable)
         && (colocation->primary_role != pcmk_role_unknown)) {
 
         if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
             // We're assigning the dependent to a node
             pcmk__update_dependent_with_promotable(primary, dependent,
                                                    colocation);
             return;
         }
 
         if (colocation->dependent_role == pcmk_role_promoted) {
             // We're choosing a role for the dependent
             pcmk__update_promotable_dependent_priority(primary, dependent,
                                                        colocation);
             return;
         }
     }
 
     // Apply interleaved colocations
     if (can_interleave(colocation)) {
         const pe_resource_t *primary_instance = NULL;
 
         primary_instance = pcmk__find_compatible_instance(dependent, primary,
                                                           pcmk_role_unknown,
                                                           false);
         if (primary_instance != NULL) {
             pe_rsc_debug(primary, "Interleaving %s with %s",
                          dependent->id, primary_instance->id);
             dependent->cmds->apply_coloc_score(dependent, primary_instance,
                                                colocation, true);
 
         } else if (colocation->score >= INFINITY) {
             crm_notice("%s cannot run because it cannot interleave with "
                        "any instance of %s", dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true, true);
 
         } else {
             pe_rsc_debug(primary,
                          "%s will not colocate with %s "
                          "because no instance can interleave with it",
                          dependent->id, primary->id);
         }
 
         return;
     }
 
     // Apply mandatory colocations
     if (colocation->score >= INFINITY) {
         GList *primary_nodes = NULL;
 
         // Dependent can run only where primary will have unblocked instances
         for (iter = primary->children; iter != NULL; iter = iter->next) {
             const pe_resource_t *instance = iter->data;
             pe_node_t *chosen = instance->fns->location(instance, NULL, 0);
 
             if ((chosen != NULL)
                 && !is_set_recursive(instance, pe_rsc_block, TRUE)) {
                 pe_rsc_trace(primary, "Allowing %s: %s %d",
                              colocation->id, pe__node_name(chosen),
                              chosen->weight);
                 primary_nodes = g_list_prepend(primary_nodes, chosen);
             }
         }
         pcmk__colocation_intersect_nodes(dependent, primary, colocation,
                                          primary_nodes, false);
         g_list_free(primary_nodes);
         return;
     }
 
     // Apply optional colocations
     for (iter = primary->children; iter != NULL; iter = iter->next) {
         const pe_resource_t *instance = iter->data;
 
         instance->cmds->apply_coloc_score(dependent, instance, colocation,
                                           false);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:with_this_colocations()
 void
 pcmk__with_clone_colocations(const pe_resource_t *rsc,
                              const pe_resource_t *orig_rsc, GList **list)
 {
     CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
 
     pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
 
     if (rsc->parent != NULL) {
         rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc, list);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:this_with_colocations()
 void
 pcmk__clone_with_colocations(const pe_resource_t *rsc,
                              const pe_resource_t *orig_rsc, GList **list)
 {
     CRM_CHECK((rsc != NULL) && (orig_rsc != NULL) && (list != NULL), return);
 
     pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
 
     if (rsc->parent != NULL) {
         rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc, list);
     }
 }
 
 /*!
  * \internal
  * \brief Return action flags for a given clone resource action
  *
  * \param[in,out] action  Action to get flags for
  * \param[in]     node    If not NULL, limit effects to this node
  *
  * \return Flags appropriate to \p action on \p node
  */
 uint32_t
 pcmk__clone_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     CRM_ASSERT((action != NULL) && pe_rsc_is_clone(action->rsc));
 
     return pcmk__collective_action_flags(action, action->rsc->children, node);
 }
 
 /*!
  * \internal
  * \brief Apply a location constraint to a clone resource's allowed node scores
  *
  * \param[in,out] rsc       Clone resource to apply constraint to
  * \param[in,out] location  Location constraint to apply
  */
 void
 pcmk__clone_apply_location(pe_resource_t *rsc, pe__location_t *location)
 {
     CRM_CHECK((location != NULL) && pe_rsc_is_clone(rsc), return);
 
     pcmk__apply_location(rsc, location);
 
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         instance->cmds->apply_location(instance, location);
     }
 }
 
 // GFunc wrapper for calling the action_flags() resource method
 static void
 call_action_flags(gpointer data, gpointer user_data)
 {
     pe_resource_t *rsc = user_data;
 
     rsc->cmds->action_flags((pe_action_t *) data, NULL);
 }
 
 /*!
  * \internal
  * \brief Add a clone resource's actions to the transition graph
  *
  * \param[in,out] rsc  Resource whose actions should be added
  */
 void
 pcmk__clone_add_actions_to_graph(pe_resource_t *rsc)
 {
     CRM_ASSERT(pe_rsc_is_clone(rsc));
 
     g_list_foreach(rsc->actions, call_action_flags, rsc);
     pe__create_clone_notifications(rsc);
 
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
 
         child_rsc->cmds->add_actions_to_graph(child_rsc);
     }
 
     pcmk__add_rsc_actions_to_graph(rsc);
     pe__free_clone_notification_data(rsc);
 }
 
 /*!
  * \internal
  * \brief Check whether a resource or any children have been probed on a node
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p node is in the known_on table of \p rsc or any of its
  *         children, otherwise false
  */
 static bool
 rsc_probed_on(const pe_resource_t *rsc, const pe_node_t *node)
 {
     if (rsc->children != NULL) {
         for (GList *child_iter = rsc->children; child_iter != NULL;
              child_iter = child_iter->next) {
 
             pe_resource_t *child = (pe_resource_t *) child_iter->data;
 
             if (rsc_probed_on(child, node)) {
                 return true;
             }
         }
         return false;
     }
 
     if (rsc->known_on != NULL) {
         GHashTableIter iter;
         pe_node_t *known_node = NULL;
 
         g_hash_table_iter_init(&iter, rsc->known_on);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
             if (pe__same_node(node, known_node)) {
                 return true;
             }
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Find clone instance that has been probed on given node
  *
  * \param[in] clone  Clone resource to check
  * \param[in] node   Node to check
  *
  * \return Instance of \p clone that has been probed on \p node if any,
  *         otherwise NULL
  */
 static pe_resource_t *
 find_probed_instance_on(const pe_resource_t *clone, const pe_node_t *node)
 {
     for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         if (rsc_probed_on(instance, node)) {
             return instance;
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Probe an anonymous clone on a node
  *
  * \param[in,out] clone  Anonymous clone to probe
  * \param[in,out] node   Node to probe \p clone on
  */
 static bool
 probe_anonymous_clone(pe_resource_t *clone, pe_node_t *node)
 {
     // Check whether we already probed an instance on this node
     pe_resource_t *child = find_probed_instance_on(clone, node);
 
     // Otherwise, check if we plan to start an instance on this node
     for (GList *iter = clone->children; (iter != NULL) && (child == NULL);
          iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
         const pe_node_t *instance_node = NULL;
 
         instance_node = instance->fns->location(instance, NULL, 0);
         if (pe__same_node(instance_node, node)) {
             child = instance;
         }
     }
 
     // Otherwise, use the first clone instance
     if (child == NULL) {
         child = clone->children->data;
     }
 
     // Anonymous clones only need to probe a single instance
     return child->cmds->create_probe(child, node);
 }
 
 /*!
  * \internal
  * \brief Schedule any probes needed for a resource on a node
  *
  * \param[in,out] rsc   Resource to create probe for
  * \param[in,out] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 pcmk__clone_create_probe(pe_resource_t *rsc, pe_node_t *node)
 {
     CRM_ASSERT((node != NULL) && pe_rsc_is_clone(rsc));
 
     if (rsc->exclusive_discover) {
         /* The clone is configured to be probed only where a location constraint
          * exists with resource-discovery set to exclusive.
          *
          * This check is not strictly necessary here since the instance's
          * create_probe() method would also check, but doing it here is more
          * efficient (especially for unique clones with a large number of
          * instances), and affects the CRM_meta_notify_available_uname variable
          * passed with notify actions.
          */
         pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes,
                                                  node->details->id);
 
         if ((allowed == NULL)
             || (allowed->rsc_discover_mode != pe_discover_exclusive)) {
             /* This node is not marked for resource discovery. Remove it from
              * allowed_nodes so that notifications contain only nodes that the
              * clone can possibly run on.
              */
             pe_rsc_trace(rsc,
                          "Skipping probe for %s on %s because resource has "
                          "exclusive discovery but is not allowed on node",
                          rsc->id, pe__node_name(node));
             g_hash_table_remove(rsc->allowed_nodes, node->details->id);
             return false;
         }
     }
 
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         return pcmk__probe_resource_list(rsc->children, node);
     } else {
         return probe_anonymous_clone(rsc, node);
     }
 }
 
 /*!
  * \internal
  * \brief Add meta-attributes relevant to transition graph actions to XML
  *
  * Add clone-specific meta-attributes needed for transition graph actions.
  *
  * \param[in]     rsc  Clone resource whose meta-attributes should be added
  * \param[in,out] xml  Transition graph action attributes XML to add to
  */
 void
 pcmk__clone_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
 {
     char *name = NULL;
 
     CRM_ASSERT(pe_rsc_is_clone(rsc) && (xml != NULL));
 
     name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
     crm_xml_add_int(xml, name, pe__clone_max(rsc));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
     crm_xml_add_int(xml, name, pe__clone_node_max(rsc));
     free(name);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         int promoted_max = pe__clone_promoted_max(rsc);
         int promoted_node_max = pe__clone_promoted_node_max(rsc);
 
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
         crm_xml_add_int(xml, name, promoted_max);
         free(name);
 
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
         crm_xml_add_int(xml, name, promoted_node_max);
         free(name);
 
         /* @COMPAT Maintain backward compatibility with resource agents that
          * expect the old names (deprecated since 2.0.0).
          */
         name = crm_meta_name(PCMK_XA_PROMOTED_MAX_LEGACY);
         crm_xml_add_int(xml, name, promoted_max);
         free(name);
 
         name = crm_meta_name(PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
         crm_xml_add_int(xml, name, promoted_node_max);
         free(name);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__clone_add_utilization(const pe_resource_t *rsc,
                             const pe_resource_t *orig_rsc, GList *all_rscs,
                             GHashTable *utilization)
 {
     bool existing = false;
     pe_resource_t *child = NULL;
 
     CRM_ASSERT(pe_rsc_is_clone(rsc) && (orig_rsc != NULL)
                && (utilization != NULL));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     // Look for any child already existing in the list
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         child = (pe_resource_t *) iter->data;
         if (g_list_find(all_rscs, child)) {
             existing = true; // Keep checking remaining children
         } else {
             // If this is a clone of a group, look for group's members
             for (GList *member_iter = child->children; member_iter != NULL;
                  member_iter = member_iter->next) {
 
                 pe_resource_t *member = (pe_resource_t *) member_iter->data;
 
                 if (g_list_find(all_rscs, member) != NULL) {
                     // Add *child's* utilization, not group member's
                     child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                                  utilization);
                     existing = true;
                     break;
                 }
             }
         }
     }
 
     if (!existing && (rsc->children != NULL)) {
         // If nothing was found, still add first child's utilization
         child = (pe_resource_t *) rsc->children->data;
 
         child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__clone_shutdown_lock(pe_resource_t *rsc)
 {
     CRM_ASSERT(pe_rsc_is_clone(rsc));
     return; // Clones currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index 05d727ef19..e363af0a8a 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -1,946 +1,947 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Assign a group resource to a node
  *
  * \param[in,out] rsc           Group resource to assign to a node
  * \param[in]     prefer        Node to prefer, if all else is equal
  * \param[in]     stop_if_fail  If \c true and a child of \p rsc can't be
  *                              assigned to a node, set the child's next role to
  *                              stopped and update existing actions
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  *
  * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
  *       completely undo the assignment. A successful assignment can be either
  *       undone or left alone as final. A failed assignment has the same effect
  *       as calling pcmk__unassign_resource(); there are no side effects on
  *       roles or actions.
  */
 pe_node_t *
 pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer,
                    bool stop_if_fail)
 {
     pe_node_t *first_assigned_node = NULL;
     pe_resource_t *first_member = NULL;
 
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group));
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to; // Assignment already done
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Assignment dependency loop detected involving %s",
                      rsc->id);
         return NULL;
     }
 
     if (rsc->children == NULL) {
         // No members to assign
         pe__clear_resource_flags(rsc, pe_rsc_provisional);
         return NULL;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     first_member = (pe_resource_t *) rsc->children->data;
     rsc->role = first_member->role;
 
     pe__show_node_scores(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                          rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *member = (pe_resource_t *) iter->data;
         pe_node_t *node = NULL;
 
         pe_rsc_trace(rsc, "Assigning group %s member %s",
                      rsc->id, member->id);
         node = member->cmds->assign(member, prefer, stop_if_fail);
         if (first_assigned_node == NULL) {
             first_assigned_node = node;
         }
     }
 
     pe__set_next_role(rsc, first_member->next_role, "first group member");
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
 
     if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
         return NULL;
     }
     return first_assigned_node;
 }
 
 /*!
  * \internal
  * \brief Create a pseudo-operation for a group as an ordering point
  *
  * \param[in,out] group   Group resource to create action for
  * \param[in]     action  Action name
  *
  * \return Newly created pseudo-operation
  */
 static pe_action_t *
 create_group_pseudo_op(pe_resource_t *group, const char *action)
 {
     pe_action_t *op = custom_action(group, pcmk__op_key(group->id, action, 0),
                                     action, NULL, TRUE, TRUE, group->cluster);
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
     return op;
 }
 
 /*!
  * \internal
  * \brief Create all actions needed for a given group resource
  *
  * \param[in,out] rsc  Group resource to create actions for
  */
 void
 pcmk__group_create_actions(pe_resource_t *rsc)
 {
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group));
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
 
     pe_rsc_trace(rsc, "Creating actions for group %s", rsc->id);
 
     // Create actions for individual group members
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *member = (pe_resource_t *) iter->data;
 
         member->cmds->create_actions(member);
     }
 
     // Create pseudo-actions for group itself to serve as ordering points
     create_group_pseudo_op(rsc, PCMK_ACTION_START);
     create_group_pseudo_op(rsc, PCMK_ACTION_RUNNING);
     create_group_pseudo_op(rsc, PCMK_ACTION_STOP);
     create_group_pseudo_op(rsc, PCMK_ACTION_STOPPED);
     if (crm_is_true(g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE))) {
         create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTE);
         create_group_pseudo_op(rsc, PCMK_ACTION_DEMOTED);
         create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTE);
         create_group_pseudo_op(rsc, PCMK_ACTION_PROMOTED);
     }
 }
 
 // User data for member_internal_constraints()
 struct member_data {
     // These could be derived from member but this avoids some function calls
     bool ordered;
     bool colocated;
     bool promotable;
 
     pe_resource_t *last_active;
     pe_resource_t *previous_member;
 };
 
 /*!
  * \internal
  * \brief Create implicit constraints needed for a group member
  *
  * \param[in,out] data       Group member to create implicit constraints for
  * \param[in,out] user_data  Member data (struct member_data *)
  */
 static void
 member_internal_constraints(gpointer data, gpointer user_data)
 {
     pe_resource_t *member = (pe_resource_t *) data;
     struct member_data *member_data = (struct member_data *) user_data;
 
     // For ordering demote vs demote or stop vs stop
     uint32_t down_flags = pe_order_implies_first_printed;
 
     // For ordering demote vs demoted or stop vs stopped
     uint32_t post_down_flags = pe_order_implies_then_printed;
 
     // Create the individual member's implicit constraints
     member->cmds->internal_constraints(member);
 
     if (member_data->previous_member == NULL) {
         // This is first member
         if (member_data->ordered) {
             pe__set_order_flags(down_flags, pe_order_optional);
             post_down_flags = pe_order_implies_then;
         }
 
     } else if (member_data->colocated) {
         uint32_t flags = pcmk__coloc_none;
 
         if (pcmk_is_set(member->flags, pe_rsc_critical)) {
             flags |= pcmk__coloc_influence;
         }
 
         // Colocate this member with the previous one
         pcmk__new_colocation("#group-members", NULL, INFINITY, member,
                              member_data->previous_member, NULL, NULL, flags);
     }
 
     if (member_data->promotable) {
         // Demote group -> demote member -> group is demoted
         pcmk__order_resource_actions(member->parent, PCMK_ACTION_DEMOTE,
                                      member, PCMK_ACTION_DEMOTE, down_flags);
         pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
                                      member->parent, PCMK_ACTION_DEMOTED,
                                      post_down_flags);
 
         // Promote group -> promote member -> group is promoted
         pcmk__order_resource_actions(member, PCMK_ACTION_PROMOTE,
                                      member->parent, PCMK_ACTION_PROMOTED,
                                      pe_order_runnable_left
                                      |pe_order_implies_then
                                      |pe_order_implies_then_printed);
         pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
                                      member, PCMK_ACTION_PROMOTE,
                                      pe_order_implies_first_printed);
     }
 
     // Stop group -> stop member -> group is stopped
     pcmk__order_stops(member->parent, member, down_flags);
     pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
                                  member->parent, PCMK_ACTION_STOPPED,
                                  post_down_flags);
 
     // Start group -> start member -> group is started
     pcmk__order_starts(member->parent, member, pe_order_implies_first_printed);
     pcmk__order_resource_actions(member, PCMK_ACTION_START,
                                  member->parent, PCMK_ACTION_RUNNING,
                                  pe_order_runnable_left
                                  |pe_order_implies_then
                                  |pe_order_implies_then_printed);
 
     if (!member_data->ordered) {
         pcmk__order_starts(member->parent, member,
                            pe_order_implies_then
                            |pe_order_runnable_left
                            |pe_order_implies_first_printed);
         if (member_data->promotable) {
             pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
                                          member, PCMK_ACTION_PROMOTE,
                                          pe_order_implies_then
                                          |pe_order_runnable_left
                                          |pe_order_implies_first_printed);
         }
 
     } else if (member_data->previous_member == NULL) {
         pcmk__order_starts(member->parent, member, pe_order_none);
         if (member_data->promotable) {
             pcmk__order_resource_actions(member->parent, PCMK_ACTION_PROMOTE,
                                          member, PCMK_ACTION_PROMOTE,
                                          pe_order_none);
         }
 
     } else {
         // Order this member relative to the previous one
 
         pcmk__order_starts(member_data->previous_member, member,
                            pe_order_implies_then|pe_order_runnable_left);
         pcmk__order_stops(member, member_data->previous_member,
                           pe_order_optional|pe_order_restart);
 
         /* In unusual circumstances (such as adding a new member to the middle
          * of a group with unmanaged later members), this member may be active
          * while the previous (new) member is inactive. In this situation, the
          * usual restart orderings will be irrelevant, so we need to order this
          * member's stop before the previous member's start.
          */
         if ((member->running_on != NULL)
             && (member_data->previous_member->running_on == NULL)) {
             pcmk__order_resource_actions(member, PCMK_ACTION_STOP,
                                          member_data->previous_member,
                                          PCMK_ACTION_START,
                                          pe_order_implies_first
                                          |pe_order_runnable_left);
         }
 
         if (member_data->promotable) {
             pcmk__order_resource_actions(member_data->previous_member,
                                          PCMK_ACTION_PROMOTE, member,
                                          PCMK_ACTION_PROMOTE,
                                          pe_order_implies_then
                                          |pe_order_runnable_left);
             pcmk__order_resource_actions(member, PCMK_ACTION_DEMOTE,
                                          member_data->previous_member,
                                          PCMK_ACTION_DEMOTE,
                                          pe_order_optional);
         }
     }
 
     // Make sure partially active groups shut down in sequence
     if (member->running_on != NULL) {
         if (member_data->ordered && (member_data->previous_member != NULL)
             && (member_data->previous_member->running_on == NULL)
             && (member_data->last_active != NULL)
             && (member_data->last_active->running_on != NULL)) {
             pcmk__order_stops(member, member_data->last_active,
                               pe_order_optional);
         }
         member_data->last_active = member;
     }
 
     member_data->previous_member = member;
 }
 
 /*!
  * \internal
  * \brief Create implicit constraints needed for a group resource
  *
  * \param[in,out] rsc  Group resource to create implicit constraints for
  */
 void
 pcmk__group_internal_constraints(pe_resource_t *rsc)
 {
     struct member_data member_data = { false, };
     const pe_resource_t *top = NULL;
 
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group));
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
 
     /* Order group pseudo-actions relative to each other for restarting:
      * stop group -> group is stopped -> start group -> group is started
      */
     pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
                                  rsc, PCMK_ACTION_STOPPED,
                                  pe_order_runnable_left);
     pcmk__order_resource_actions(rsc, PCMK_ACTION_STOPPED,
                                  rsc, PCMK_ACTION_START,
                                  pe_order_optional);
     pcmk__order_resource_actions(rsc, PCMK_ACTION_START,
                                  rsc, PCMK_ACTION_RUNNING,
                                  pe_order_runnable_left);
 
     top = pe__const_top_resource(rsc, false);
 
     member_data.ordered = pe__group_flag_is_set(rsc, pe__group_ordered);
     member_data.colocated = pe__group_flag_is_set(rsc, pe__group_colocated);
     member_data.promotable = pcmk_is_set(top->flags, pe_rsc_promotable);
     g_list_foreach(rsc->children, member_internal_constraints, &member_data);
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node scores or resource priority
  *
  * Given a colocation constraint for a group with some other resource, apply the
  * score to the dependent's allowed node scores (if we are still placing
  * resources) or priority (if we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent group resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  */
 static void
 colocate_group_with(pe_resource_t *dependent, const pe_resource_t *primary,
                     const pcmk__colocation_t *colocation)
 {
     pe_resource_t *member = NULL;
 
     if (dependent->children == NULL) {
         return;
     }
 
     pe_rsc_trace(primary, "Processing %s (group %s with %s) for dependent",
                  colocation->id, dependent->id, primary->id);
 
     if (pe__group_flag_is_set(dependent, pe__group_colocated)) {
         // Colocate first member (internal colocations will handle the rest)
         member = (pe_resource_t *) dependent->children->data;
         member->cmds->apply_coloc_score(member, primary, colocation, true);
         return;
     }
 
     if (colocation->score >= INFINITY) {
         pcmk__config_err("%s: Cannot perform mandatory colocation between "
                          "non-colocated group and %s",
                          dependent->id, primary->id);
         return;
     }
 
     // Colocate each member individually
     for (GList *iter = dependent->children; iter != NULL; iter = iter->next) {
         member = (pe_resource_t *) iter->data;
         member->cmds->apply_coloc_score(member, primary, colocation, true);
     }
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node scores or resource priority
  *
  * Given a colocation constraint for some other resource with a group, apply the
  * score to the dependent's allowed node scores (if we are still placing
  * resources) or priority (if we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary group resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  */
 static void
 colocate_with_group(pe_resource_t *dependent, const pe_resource_t *primary,
                     const pcmk__colocation_t *colocation)
 {
     const pe_resource_t *member = NULL;
 
     pe_rsc_trace(primary,
                  "Processing colocation %s (%s with group %s) for primary",
                  colocation->id, dependent->id, primary->id);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         return;
     }
 
     if (pe__group_flag_is_set(primary, pe__group_colocated)) {
 
         if (colocation->score >= INFINITY) {
             /* For mandatory colocations, the entire group must be assignable
              * (and in the specified role if any), so apply the colocation based
              * on the last member.
              */
             member = pe__last_group_member(primary);
         } else if (primary->children != NULL) {
             /* For optional colocations, whether the group is partially or fully
              * up doesn't matter, so apply the colocation based on the first
              * member.
              */
             member = (pe_resource_t *) primary->children->data;
         }
         if (member == NULL) {
             return; // Nothing to colocate with
         }
 
         member->cmds->apply_coloc_score(dependent, member, colocation, false);
         return;
     }
 
     if (colocation->score >= INFINITY) {
         pcmk__config_err("%s: Cannot perform mandatory colocation with"
                          " non-colocated group %s",
                          dependent->id, primary->id);
         return;
     }
 
     // Colocate dependent with each member individually
     for (const GList *iter = primary->children; iter != NULL;
          iter = iter->next) {
         member = iter->data;
         member->cmds->apply_coloc_score(dependent, member, colocation, false);
     }
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node scores or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node scores (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__group_apply_coloc_score(pe_resource_t *dependent,
                               const pe_resource_t *primary,
                               const pcmk__colocation_t *colocation,
                               bool for_dependent)
 {
     CRM_ASSERT((dependent != NULL) && (primary != NULL)
                && (colocation != NULL));
 
     if (for_dependent) {
         colocate_group_with(dependent, primary, colocation);
 
     } else {
         // Method should only be called for primitive dependents
         CRM_ASSERT(dependent->variant == pcmk_rsc_variant_primitive);
 
         colocate_with_group(dependent, primary, colocation);
     }
 }
 
 /*!
  * \internal
  * \brief Return action flags for a given group resource action
  *
  * \param[in,out] action  Group action to get flags for
  * \param[in]     node    If not NULL, limit effects to this node
  *
  * \return Flags appropriate to \p action on \p node
  */
 uint32_t
 pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     // Default flags for a group action
     uint32_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
 
     CRM_ASSERT(action != NULL);
 
     // Update flags considering each member's own flags for same action
     for (GList *iter = action->rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *member = (pe_resource_t *) iter->data;
 
         // Check whether member has the same action
         enum action_tasks task = get_complex_task(member, action->task);
         const char *task_s = task2text(task);
         pe_action_t *member_action = find_first_action(member->actions, NULL,
                                                        task_s, node);
 
         if (member_action != NULL) {
             uint32_t member_flags = member->cmds->action_flags(member_action,
                                                                node);
 
             // Group action is mandatory if any member action is
             if (pcmk_is_set(flags, pe_action_optional)
                 && !pcmk_is_set(member_flags, pe_action_optional)) {
                 pe_rsc_trace(action->rsc, "%s is mandatory because %s is",
                              action->uuid, member_action->uuid);
                 pe__clear_raw_action_flags(flags, "group action",
                                            pe_action_optional);
                 pe__clear_action_flags(action, pe_action_optional);
             }
 
             // Group action is unrunnable if any member action is
             if (!pcmk__str_eq(task_s, action->task, pcmk__str_none)
                 && pcmk_is_set(flags, pe_action_runnable)
                 && !pcmk_is_set(member_flags, pe_action_runnable)) {
 
                 pe_rsc_trace(action->rsc, "%s is unrunnable because %s is",
                              action->uuid, member_action->uuid);
                 pe__clear_raw_action_flags(flags, "group action",
                                            pe_action_runnable);
                 pe__clear_action_flags(action, pe_action_runnable);
             }
 
         /* Group (pseudo-)actions other than stop or demote are unrunnable
          * unless every member will do it.
          */
         } else if ((task != pcmk_action_stop) && (task != pcmk_action_demote)) {
             pe_rsc_trace(action->rsc,
                          "%s is not runnable because %s will not %s",
                          action->uuid, member->id, task_s);
             pe__clear_raw_action_flags(flags, "group action",
                                        pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two actions, update the actions' flags
  * (and runnable_before members if appropriate) as appropriate for the ordering.
  * Effects may cascade to other orderings involving the actions as well.
  *
  * \param[in,out] first     'First' action in an ordering
  * \param[in,out] then      'Then' action in an ordering
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  *                          (only used when interleaving instances)
  * \param[in]     flags     Action flags for \p first for ordering purposes
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  * \param[in,out] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 pcmk__group_update_ordered_actions(pe_action_t *first, pe_action_t *then,
                                    const pe_node_t *node, uint32_t flags,
                                    uint32_t filter, uint32_t type,
                                    pe_working_set_t *data_set)
 {
     uint32_t changed = pcmk__updated_none;
 
     // Group method can be called only on behalf of "then" action
     CRM_ASSERT((first != NULL) && (then != NULL) && (then->rsc != NULL)
                && (data_set != NULL));
 
     // Update the actions for the group itself
     changed |= pcmk__update_ordered_actions(first, then, node, flags, filter,
                                             type, data_set);
 
     // Update the actions for each group member
     for (GList *iter = then->rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *member = (pe_resource_t *) iter->data;
 
         pe_action_t *member_action = find_first_action(member->actions, NULL,
                                                        then->task, node);
 
         if (member_action != NULL) {
             changed |= member->cmds->update_ordered_actions(first,
                                                             member_action, node,
                                                             flags, filter, type,
                                                             data_set);
         }
     }
     return changed;
 }
 
 /*!
  * \internal
  * \brief Apply a location constraint to a group's allowed node scores
  *
  * \param[in,out] rsc       Group resource to apply constraint to
  * \param[in,out] location  Location constraint to apply
  */
 void
 pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location)
 {
     GList *node_list_orig = NULL;
     GList *node_list_copy = NULL;
     bool reset_scores = true;
 
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
                && (location != NULL));
 
     node_list_orig = location->node_list_rh;
     node_list_copy = pcmk__copy_node_list(node_list_orig, true);
     reset_scores = pe__group_flag_is_set(rsc, pe__group_colocated);
 
     // Apply the constraint for the group itself (updates node scores)
     pcmk__apply_location(rsc, location);
 
     // Apply the constraint for each member
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *member = (pe_resource_t *) iter->data;
 
         member->cmds->apply_location(member, location);
 
         if (reset_scores) {
             /* The first member of colocated groups needs to use the original
              * node scores, but subsequent members should work on a copy, since
              * the first member's scores already incorporate theirs.
              */
             reset_scores = false;
             location->node_list_rh = node_list_copy;
         }
     }
 
     location->node_list_rh = node_list_orig;
     g_list_free_full(node_list_copy, free);
 }
 
 // Group implementation of resource_alloc_functions_t:colocated_resources()
 GList *
 pcmk__group_colocated_resources(const pe_resource_t *rsc,
                                 const pe_resource_t *orig_rsc,
                                 GList *colocated_rscs)
 {
     const pe_resource_t *member = NULL;
 
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group));
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
 
     if (orig_rsc == NULL) {
         orig_rsc = rsc;
     }
 
     if (pe__group_flag_is_set(rsc, pe__group_colocated)
         || pe_rsc_is_clone(rsc->parent)) {
         /* This group has colocated members and/or is cloned -- either way,
          * add every child's colocated resources to the list. The first and last
          * members will include the group's own colocations.
          */
         colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc);
         for (const GList *iter = rsc->children;
              iter != NULL; iter = iter->next) {
 
             member = (const pe_resource_t *) iter->data;
             colocated_rscs = member->cmds->colocated_resources(member, orig_rsc,
                                                                colocated_rscs);
         }
 
     } else if (rsc->children != NULL) {
         /* This group's members are not colocated, and the group is not cloned,
          * so just add the group's own colocations to the list.
          */
         colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc,
                                                    colocated_rscs);
     }
 
     return colocated_rscs;
 }
 
 // Group implementation of resource_alloc_functions_t:with_this_colocations()
 void
 pcmk__with_group_colocations(const pe_resource_t *rsc,
                              const pe_resource_t *orig_rsc, GList **list)
 
 {
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
                && (orig_rsc != NULL) && (list != NULL));
 
     // Ignore empty groups
     if (rsc->children == NULL) {
         return;
     }
 
     /* "With this" colocations are needed only for the group itself and for its
      * last member. (Previous members will chain via the group internal
      * colocations.)
      */
     if ((orig_rsc != rsc) && (orig_rsc != pe__last_group_member(rsc))) {
         return;
     }
 
     pe_rsc_trace(rsc, "Adding 'with %s' colocations to list for %s",
                  rsc->id, orig_rsc->id);
 
     // Add the group's own colocations
     pcmk__add_with_this_list(list, rsc->rsc_cons_lhs, orig_rsc);
 
     // If cloned, add any relevant colocations with the clone
     if (rsc->parent != NULL) {
         rsc->parent->cmds->with_this_colocations(rsc->parent, orig_rsc,
                                                  list);
     }
 
     if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
         // @COMPAT Non-colocated groups are deprecated
         return;
     }
 
     // Add explicit colocations with the group's (other) children
     for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         const pe_resource_t *member = iter->data;
 
         if (member != orig_rsc) {
             member->cmds->with_this_colocations(member, orig_rsc, list);
         }
     }
 }
 
 // Group implementation of resource_alloc_functions_t:this_with_colocations()
 void
 pcmk__group_with_colocations(const pe_resource_t *rsc,
                              const pe_resource_t *orig_rsc, GList **list)
 {
     const pe_resource_t *member = NULL;
 
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
                && (orig_rsc != NULL) && (list != NULL));
 
     // Ignore empty groups
     if (rsc->children == NULL) {
         return;
     }
 
     /* "This with" colocations are normally needed only for the group itself and
      * for its first member.
      */
     if ((rsc == orig_rsc)
         || (orig_rsc == (const pe_resource_t *) rsc->children->data)) {
         pe_rsc_trace(rsc, "Adding '%s with' colocations to list for %s",
                      rsc->id, orig_rsc->id);
 
         // Add the group's own colocations
         pcmk__add_this_with_list(list, rsc->rsc_cons, orig_rsc);
 
         // If cloned, add any relevant colocations involving the clone
         if (rsc->parent != NULL) {
             rsc->parent->cmds->this_with_colocations(rsc->parent, orig_rsc,
                                                      list);
         }
 
         if (!pe__group_flag_is_set(rsc, pe__group_colocated)) {
             // @COMPAT Non-colocated groups are deprecated
             return;
         }
 
         // Add explicit colocations involving the group's (other) children
         for (const GList *iter = rsc->children;
              iter != NULL; iter = iter->next) {
             member = iter->data;
             if (member != orig_rsc) {
                 member->cmds->this_with_colocations(member, orig_rsc, list);
             }
         }
         return;
     }
 
     /* Later group members honor the group's colocations indirectly, due to the
      * internal group colocations that chain everything from the first member.
      * However, if an earlier group member is unmanaged, this chaining will not
      * happen, so the group's mandatory colocations must be explicitly added.
      */
     for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         member = iter->data;
         if (orig_rsc == member) {
             break; // We've seen all earlier members, and none are unmanaged
         }
 
         if (!pcmk_is_set(member->flags, pe_rsc_managed)) {
             crm_trace("Adding mandatory '%s with' colocations to list for "
                       "member %s because earlier member %s is unmanaged",
                       rsc->id, orig_rsc->id, member->id);
             for (const GList *cons_iter = rsc->rsc_cons; cons_iter != NULL;
                  cons_iter = cons_iter->next) {
                 const pcmk__colocation_t *colocation = NULL;
 
                 colocation = (const pcmk__colocation_t *) cons_iter->data;
                 if (colocation->score == INFINITY) {
                     pcmk__add_this_with(list, colocation, orig_rsc);
                 }
             }
             // @TODO Add mandatory (or all?) clone constraints if cloned
             break;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Update nodes with scores of colocated resources' nodes
  *
  * Given a table of nodes and a resource, update the nodes' scores with the
  * scores of the best nodes matching the attribute used for each of the
  * resource's relevant colocations.
  *
  * \param[in,out] source_rsc  Group resource whose node scores to add
  * \param[in]     target_rsc  Resource on whose behalf to update \p *nodes
  * \param[in]     log_id      Resource ID for logs (if \c NULL, use
  *                            \p source_rsc ID)
  * \param[in,out] nodes       Nodes to update (set initial contents to \c NULL
  *                            to copy allowed nodes from \p source_rsc)
  * \param[in]     colocation  Original colocation constraint (used to get
  *                            configured primary resource's stickiness, and
  *                            to get colocation node attribute; if \c NULL,
  *                            <tt>source_rsc</tt>'s own matching node scores will
  *                            not be added, and \p *nodes must be \c NULL as
  *                            well)
  * \param[in]     factor      Incorporate scores multiplied by this factor
  * \param[in]     flags       Bitmask of enum pcmk__coloc_select values
  *
  * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
  *       the \c pcmk__coloc_select_this_with flag are used together (and only by
  *       \c cmp_resources()).
  * \note The caller remains responsible for freeing \p *nodes.
  * \note This is the group implementation of
  *       \c resource_alloc_functions_t:add_colocated_node_scores().
  */
 void
 pcmk__group_add_colocated_node_scores(pe_resource_t *source_rsc,
                                       const pe_resource_t *target_rsc,
                                       const char *log_id, GHashTable **nodes,
                                       const pcmk__colocation_t *colocation,
                                       float factor, uint32_t flags)
 {
     pe_resource_t *member = NULL;
 
-    CRM_ASSERT((source_rsc != NULL) && (source_rsc->variant == pe_group)
+    CRM_ASSERT((source_rsc != NULL)
+               && (source_rsc->variant == pcmk_rsc_variant_group)
                && (nodes != NULL)
                && ((colocation != NULL)
                    || ((target_rsc == NULL) && (*nodes == NULL))));
 
     if (log_id == NULL) {
         log_id = source_rsc->id;
     }
 
     // Avoid infinite recursion
     if (pcmk_is_set(source_rsc->flags, pe_rsc_merging)) {
         pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
                     log_id, source_rsc->id);
         return;
     }
     pe__set_resource_flags(source_rsc, pe_rsc_merging);
 
     // Ignore empty groups (only possible with schema validation disabled)
     if (source_rsc->children == NULL) {
         return;
     }
 
     /* Refer the operation to the first or last member as appropriate.
      *
      * cmp_resources() is the only caller that passes a NULL nodes table,
      * and is also the only caller using pcmk__coloc_select_this_with.
      * For "this with" colocations, the last member will recursively incorporate
      * all the other members' "this with" colocations via the internal group
      * colocations (and via the first member, the group's own colocations).
      *
      * For "with this" colocations, the first member works similarly.
      */
     if (*nodes == NULL) {
         member = pe__last_group_member(source_rsc);
     } else {
         member = source_rsc->children->data;
     }
     pe_rsc_trace(source_rsc, "%s: Merging scores from group %s using member %s "
                  "(at %.6f)", log_id, source_rsc->id, member->id, factor);
     member->cmds->add_colocated_node_scores(member, target_rsc, log_id, nodes,
                                             colocation, factor, flags);
     pe__clear_resource_flags(source_rsc, pe_rsc_merging);
 }
 
 // Group implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__group_add_utilization(const pe_resource_t *rsc,
                             const pe_resource_t *orig_rsc, GList *all_rscs,
                             GHashTable *utilization)
 {
     pe_resource_t *member = NULL;
 
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group)
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group)
                && (orig_rsc != NULL) && (utilization != NULL));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
                  orig_rsc->id, rsc->id);
     if (pe__group_flag_is_set(rsc, pe__group_colocated)
         || pe_rsc_is_clone(rsc->parent)) {
         // Every group member will be on same node, so sum all members
         for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
             member = (pe_resource_t *) iter->data;
 
             if (pcmk_is_set(member->flags, pe_rsc_provisional)
                 && (g_list_find(all_rscs, member) == NULL)) {
                 member->cmds->add_utilization(member, orig_rsc, all_rscs,
                                               utilization);
             }
         }
 
     } else if (rsc->children != NULL) {
         // Just add first member's utilization
         member = (pe_resource_t *) rsc->children->data;
         if ((member != NULL)
             && pcmk_is_set(member->flags, pe_rsc_provisional)
             && (g_list_find(all_rscs, member) == NULL)) {
 
             member->cmds->add_utilization(member, orig_rsc, all_rscs,
                                           utilization);
         }
     }
 }
 
 // Group implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__group_shutdown_lock(pe_resource_t *rsc)
 {
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group));
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
 
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *member = (pe_resource_t *) iter->data;
 
         member->cmds->shutdown_lock(member);
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_probes.c b/lib/pacemaker/pcmk_sched_probes.c
index 357326733b..b421587f97 100644
--- a/lib/pacemaker/pcmk_sched_probes.c
+++ b/lib/pacemaker/pcmk_sched_probes.c
@@ -1,903 +1,903 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <glib.h>
 
 #include <crm/crm.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Add the expected result to a newly created probe
  *
  * \param[in,out] probe  Probe action to add expected result to
  * \param[in]     rsc    Resource that probe is for
  * \param[in]     node   Node that probe will run on
  */
 static void
 add_expected_result(pe_action_t *probe, const pe_resource_t *rsc,
                     const pe_node_t *node)
 {
     // Check whether resource is currently active on node
     pe_node_t *running = pe_find_node_id(rsc->running_on, node->details->id);
 
     // The expected result is what we think the resource's current state is
     if (running == NULL) {
         pe__add_action_expected_result(probe, CRM_EX_NOT_RUNNING);
 
     } else if (rsc->role == pcmk_role_promoted) {
         pe__add_action_expected_result(probe, CRM_EX_PROMOTED);
     }
 }
 
 /*!
  * \internal
  * \brief Create any needed robes on a node for a list of resources
  *
  * \param[in,out] rscs  List of resources to create probes for
  * \param[in,out] node  Node to create probes on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 pcmk__probe_resource_list(GList *rscs, pe_node_t *node)
 {
     bool any_created = false;
 
     for (GList *iter = rscs; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         if (rsc->cmds->create_probe(rsc, node)) {
             any_created = true;
         }
     }
     return any_created;
 }
 
 /*!
  * \internal
  * \brief Order one resource's start after another's start-up probe
  *
  * \param[in,out] rsc1  Resource that might get start-up probe
  * \param[in]     rsc2  Resource that might be started
  */
 static void
 probe_then_start(pe_resource_t *rsc1, pe_resource_t *rsc2)
 {
     if ((rsc1->allocated_to != NULL)
         && (g_hash_table_lookup(rsc1->known_on,
                                 rsc1->allocated_to->details->id) == NULL)) {
 
         pcmk__new_ordering(rsc1,
                            pcmk__op_key(rsc1->id, PCMK_ACTION_MONITOR, 0),
                            NULL,
                            rsc2, pcmk__op_key(rsc2->id, PCMK_ACTION_START, 0),
                            NULL,
                            pe_order_optional, rsc1->cluster);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a guest resource will stop
  *
  * \param[in] node  Guest node to check
  *
  * \return true if guest resource will likely stop, otherwise false
  */
 static bool
 guest_resource_will_stop(const pe_node_t *node)
 {
     const pe_resource_t *guest_rsc = node->details->remote_rsc->container;
 
     /* Ideally, we'd check whether the guest has a required stop, but that
      * information doesn't exist yet, so approximate it ...
      */
     return node->details->remote_requires_reset
            || node->details->unclean
            || pcmk_is_set(guest_rsc->flags, pe_rsc_failed)
            || (guest_rsc->next_role == pcmk_role_stopped)
 
            // Guest is moving
            || ((guest_rsc->role > pcmk_role_stopped)
                && (guest_rsc->allocated_to != NULL)
                && (pe_find_node(guest_rsc->running_on,
                    guest_rsc->allocated_to->details->uname) == NULL));
 }
 
 /*!
  * \internal
  * \brief Create a probe action for a resource on a node
  *
  * \param[in,out] rsc   Resource to create probe for
  * \param[in,out] node  Node to create probe on
  *
  * \return Newly created probe action
  */
 static pe_action_t *
 probe_action(pe_resource_t *rsc, pe_node_t *node)
 {
     pe_action_t *probe = NULL;
     char *key = pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0);
 
     crm_debug("Scheduling probe of %s %s on %s",
               role2text(rsc->role), rsc->id, pe__node_name(node));
 
     probe = custom_action(rsc, key, PCMK_ACTION_MONITOR, node, FALSE, TRUE,
                           rsc->cluster);
     pe__clear_action_flags(probe, pe_action_optional);
 
     pcmk__order_vs_unfence(rsc, node, probe, pe_order_optional);
     add_expected_result(probe, rsc, node);
     return probe;
 }
 
 /*!
  * \internal
  * \brief Create probes for a resource on a node, if needed
  *
  * \brief Schedule any probes needed for a resource on a node
  *
  * \param[in,out] rsc   Resource to create probe for
  * \param[in,out] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node)
 {
     uint32_t flags = pe_order_optional;
     pe_action_t *probe = NULL;
     pe_node_t *allowed = NULL;
     pe_resource_t *top = uber_parent(rsc);
     const char *reason = NULL;
 
     CRM_ASSERT((rsc != NULL) && (node != NULL));
 
     if (!pcmk_is_set(rsc->cluster->flags, pe_flag_startup_probes)) {
         reason = "start-up probes are disabled";
         goto no_probe;
     }
 
     if (pe__is_guest_or_remote_node(node)) {
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
         if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_none)) {
             reason = "Pacemaker Remote nodes cannot run stonith agents";
             goto no_probe;
 
         } else if (pe__is_guest_node(node)
                    && pe__resource_contains_guest_node(rsc->cluster, rsc)) {
             reason = "guest nodes cannot run resources containing guest nodes";
             goto no_probe;
 
         } else if (rsc->is_remote_node) {
             reason = "Pacemaker Remote nodes cannot host remote connections";
             goto no_probe;
         }
     }
 
     // If this is a collective resource, probes are created for its children
     if (rsc->children != NULL) {
         return pcmk__probe_resource_list(rsc->children, node);
     }
 
     if ((rsc->container != NULL) && !rsc->is_remote_node) {
         reason = "resource is inside a container";
         goto no_probe;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         reason = "resource is orphaned";
         goto no_probe;
 
     } else if (g_hash_table_lookup(rsc->known_on, node->details->id) != NULL) {
         reason = "resource state is already known";
         goto no_probe;
     }
 
     allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
 
     if (rsc->exclusive_discover || top->exclusive_discover) {
         // Exclusive discovery is enabled ...
 
         if (allowed == NULL) {
             // ... but this node is not allowed to run the resource
             reason = "resource has exclusive discovery but is not allowed "
                      "on node";
             goto no_probe;
 
         } else if (allowed->rsc_discover_mode != pe_discover_exclusive) {
             // ... but no constraint marks this node for discovery of resource
             reason = "resource has exclusive discovery but is not enabled "
                      "on node";
             goto no_probe;
         }
     }
 
     if (allowed == NULL) {
         allowed = node;
     }
     if (allowed->rsc_discover_mode == pe_discover_never) {
         reason = "node has discovery disabled";
         goto no_probe;
     }
 
     if (pe__is_guest_node(node)) {
         pe_resource_t *guest = node->details->remote_rsc->container;
 
         if (guest->role == pcmk_role_stopped) {
             // The guest is stopped, so we know no resource is active there
             reason = "node's guest is stopped";
             probe_then_start(guest, top);
             goto no_probe;
 
         } else if (guest_resource_will_stop(node)) {
             reason = "node's guest will stop";
 
             // Order resource start after guest stop (in case it's restarting)
             pcmk__new_ordering(guest,
                                pcmk__op_key(guest->id, PCMK_ACTION_STOP, 0),
                                NULL, top,
                                pcmk__op_key(top->id, PCMK_ACTION_START, 0),
                                NULL, pe_order_optional, rsc->cluster);
             goto no_probe;
         }
     }
 
     // We've eliminated all cases where a probe is not needed, so now it is
     probe = probe_action(rsc, node);
 
     /* Below, we will order the probe relative to start or reload. If this is a
      * clone instance, the start or reload is for the entire clone rather than
      * just the instance. Otherwise, the start or reload is for the resource
      * itself.
      */
     if (!pe_rsc_is_clone(top)) {
         top = rsc;
     }
 
     /* Prevent a start if the resource can't be probed, but don't cause the
      * resource or entire clone to stop if already active.
      */
     if (!pcmk_is_set(probe->flags, pe_action_runnable)
         && (top->running_on == NULL)) {
         pe__set_order_flags(flags, pe_order_runnable_left);
     }
 
     // Start or reload after probing the resource
     pcmk__new_ordering(rsc, NULL, probe,
                        top, pcmk__op_key(top->id, PCMK_ACTION_START, 0), NULL,
                        flags, rsc->cluster);
     pcmk__new_ordering(rsc, NULL, probe, top, reload_key(rsc), NULL,
                        pe_order_optional, rsc->cluster);
 
     return true;
 
 no_probe:
     pe_rsc_trace(rsc,
                  "Skipping probe for %s on %s because %s",
                  rsc->id, node->details->id, reason);
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a probe should be ordered before another action
  *
  * \param[in] probe  Probe action to check
  * \param[in] then   Other action to check
  *
  * \return true if \p probe should be ordered before \p then, otherwise false
  */
 static bool
 probe_needed_before_action(const pe_action_t *probe, const pe_action_t *then)
 {
     // Probes on a node are performed after unfencing it, not before
     if (pcmk__str_eq(then->task, PCMK_ACTION_STONITH, pcmk__str_none)
         && pe__same_node(probe->node, then->node)) {
         const char *op = g_hash_table_lookup(then->meta, "stonith_action");
 
         if (pcmk__str_eq(op, PCMK_ACTION_ON, pcmk__str_casei)) {
             return false;
         }
     }
 
     // Probes should be done on a node before shutting it down
     if (pcmk__str_eq(then->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)
         && (probe->node != NULL) && (then->node != NULL)
         && !pe__same_node(probe->node, then->node)) {
         return false;
     }
 
     // Otherwise probes should always be done before any other action
     return true;
 }
 
 /*!
  * \internal
  * \brief Add implicit "probe then X" orderings for "stop then X" orderings
  *
  * If the state of a resource is not known yet, a probe will be scheduled,
  * expecting a "not running" result. If the probe fails, a stop will not be
  * scheduled until the next transition. Thus, if there are ordering constraints
  * like "stop this resource then do something else that's not for the same
  * resource", add implicit "probe this resource then do something" equivalents
  * so the relation is upheld until we know whether a stop is needed.
  *
  * \param[in,out] data_set  Cluster working set
  */
 static void
 add_probe_orderings_for_stops(pe_working_set_t *data_set)
 {
     for (GList *iter = data_set->ordering_constraints; iter != NULL;
          iter = iter->next) {
 
         pe__ordering_t *order = iter->data;
         uint32_t order_flags = pe_order_optional;
         GList *probes = NULL;
         GList *then_actions = NULL;
         pe_action_t *first = NULL;
         pe_action_t *then = NULL;
 
         // Skip disabled orderings
         if (order->flags == pe_order_none) {
             continue;
         }
 
         // Skip non-resource orderings, and orderings for the same resource
         if ((order->lh_rsc == NULL) || (order->lh_rsc == order->rh_rsc)) {
             continue;
         }
 
         // Skip invalid orderings (shouldn't be possible)
         first = order->lh_action;
         then = order->rh_action;
         if (((first == NULL) && (order->lh_action_task == NULL))
             || ((then == NULL) && (order->rh_action_task == NULL))) {
             continue;
         }
 
         // Skip orderings for first actions other than stop
         if ((first != NULL) && !pcmk__str_eq(first->task, PCMK_ACTION_STOP,
                                              pcmk__str_none)) {
             continue;
         } else if ((first == NULL)
                    && !pcmk__ends_with(order->lh_action_task,
                                        "_" PCMK_ACTION_STOP "_0")) {
             continue;
         }
 
         /* Do not imply a probe ordering for a resource inside of a stopping
          * container. Otherwise, it might introduce a transition loop, since a
          * probe could be scheduled after the container starts again.
          */
         if ((order->rh_rsc != NULL)
             && (order->lh_rsc->container == order->rh_rsc)) {
 
             if ((then != NULL) && pcmk__str_eq(then->task, PCMK_ACTION_STOP,
                                                pcmk__str_none)) {
                 continue;
             } else if ((then == NULL)
                        && pcmk__ends_with(order->rh_action_task,
                                           "_" PCMK_ACTION_STOP "_0")) {
                 continue;
             }
         }
 
         // Preserve certain order options for future filtering
         if (pcmk_is_set(order->flags, pe_order_apply_first_non_migratable)) {
             pe__set_order_flags(order_flags,
                                 pe_order_apply_first_non_migratable);
         }
         if (pcmk_is_set(order->flags, pe_order_same_node)) {
             pe__set_order_flags(order_flags, pe_order_same_node);
         }
 
         // Preserve certain order types for future filtering
         if ((order->flags == pe_order_anti_colocation)
             || (order->flags == pe_order_load)) {
             order_flags = order->flags;
         }
 
         // List all scheduled probes for the first resource
         probes = pe__resource_actions(order->lh_rsc, NULL, PCMK_ACTION_MONITOR,
                                       FALSE);
         if (probes == NULL) { // There aren't any
             continue;
         }
 
         // List all relevant "then" actions
         if (then != NULL) {
             then_actions = g_list_prepend(NULL, then);
 
         } else if (order->rh_rsc != NULL) {
             then_actions = find_actions(order->rh_rsc->actions,
                                         order->rh_action_task, NULL);
             if (then_actions == NULL) { // There aren't any
                 g_list_free(probes);
                 continue;
             }
         }
 
         crm_trace("Implying 'probe then' orderings for '%s then %s' "
                   "(id=%d, type=%.6x)",
                   ((first == NULL)? order->lh_action_task : first->uuid),
                   ((then == NULL)? order->rh_action_task : then->uuid),
                   order->id, order->flags);
 
         for (GList *probe_iter = probes; probe_iter != NULL;
              probe_iter = probe_iter->next) {
 
             pe_action_t *probe = (pe_action_t *) probe_iter->data;
 
             for (GList *then_iter = then_actions; then_iter != NULL;
                  then_iter = then_iter->next) {
 
                 pe_action_t *then = (pe_action_t *) then_iter->data;
 
                 if (probe_needed_before_action(probe, then)) {
                     order_actions(probe, then, order_flags);
                 }
             }
         }
 
         g_list_free(then_actions);
         g_list_free(probes);
     }
 }
 
 /*!
  * \internal
  * \brief Add necessary orderings between probe and starts of clone instances
  *
  * , in additon to the ordering with the parent resource added upon creating
  * the probe.
  *
  * \param[in,out] probe     Probe as 'first' action in an ordering
  * \param[in,out] after     'then' action wrapper in the ordering
  */
 static void
 add_start_orderings_for_probe(pe_action_t *probe, pe_action_wrapper_t *after)
 {
     uint32_t flags = pe_order_optional|pe_order_runnable_left;
 
     /* Although the ordering between the probe of the clone instance and the
      * start of its parent has been added in pcmk__probe_rsc_on_node(), we
      * avoided enforcing `pe_order_runnable_left` order type for that as long as
      * any of the clone instances are running to prevent them from being
      * unexpectedly stopped.
      *
      * On the other hand, we still need to prevent any inactive instances from
      * starting unless the probe is runnable so that we don't risk starting too
      * many instances before we know the state on all nodes.
      */
-    if ((after->action->rsc->variant <= pe_group)
+    if ((after->action->rsc->variant <= pcmk_rsc_variant_group)
         || pcmk_is_set(probe->flags, pe_action_runnable)
         // The order type is already enforced for its parent.
         || pcmk_is_set(after->type, pe_order_runnable_left)
         || (pe__const_top_resource(probe->rsc, false) != after->action->rsc)
         || !pcmk__str_eq(after->action->task, PCMK_ACTION_START,
                          pcmk__str_none)) {
         return;
     }
 
     crm_trace("Adding probe start orderings for 'unrunnable %s@%s "
               "then instances of %s@%s'",
               probe->uuid, pe__node_name(probe->node),
               after->action->uuid, pe__node_name(after->action->node));
 
     for (GList *then_iter = after->action->actions_after; then_iter != NULL;
          then_iter = then_iter->next) {
 
         pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data;
 
         if (then->action->rsc->running_on
             || (pe__const_top_resource(then->action->rsc, false)
                 != after->action->rsc)
             || !pcmk__str_eq(then->action->task, PCMK_ACTION_START,
                              pcmk__str_none)) {
             continue;
         }
 
         crm_trace("Adding probe start ordering for 'unrunnable %s@%s "
                   "then %s@%s' (type=%#.6x)",
                   probe->uuid, pe__node_name(probe->node),
                   then->action->uuid, pe__node_name(then->action->node), flags);
 
         /* Prevent the instance from starting if the instance can't, but don't
          * cause any other intances to stop if already active.
          */
         order_actions(probe, then->action, flags);
     }
 
     return;
 }
 
 /*!
  * \internal
  * \brief Order probes before restarts and re-promotes
  *
  * If a given ordering is a "probe then start" or "probe then promote" ordering,
  * add an implicit "probe then stop/demote" ordering in case the action is part
  * of a restart/re-promote, and do the same recursively for all actions ordered
  * after the "then" action.
  *
  * \param[in,out] probe     Probe as 'first' action in an ordering
  * \param[in,out] after     'then' action in the ordering
  */
 static void
 add_restart_orderings_for_probe(pe_action_t *probe, pe_action_t *after)
 {
     GList *iter = NULL;
     bool interleave = false;
     pe_resource_t *compatible_rsc = NULL;
 
     // Validate that this is a resource probe followed by some action
     if ((after == NULL) || (probe == NULL) || (probe->rsc == NULL)
         || (probe->rsc->variant != pcmk_rsc_variant_primitive)
         || !pcmk__str_eq(probe->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
         return;
     }
 
     // Avoid running into any possible loop
     if (pcmk_is_set(after->flags, pe_action_tracking)) {
         return;
     }
     pe__set_action_flags(after, pe_action_tracking);
 
     crm_trace("Adding probe restart orderings for '%s@%s then %s@%s'",
               probe->uuid, pe__node_name(probe->node),
               after->uuid, pe__node_name(after->node));
 
     /* Add restart orderings if "then" is for a different primitive.
      * Orderings for collective resources will be added later.
      */
     if ((after->rsc != NULL)
         && (after->rsc->variant == pcmk_rsc_variant_primitive)
         && (probe->rsc != after->rsc)) {
 
             GList *then_actions = NULL;
 
             if (pcmk__str_eq(after->task, PCMK_ACTION_START, pcmk__str_none)) {
                 then_actions = pe__resource_actions(after->rsc, NULL,
                                                     PCMK_ACTION_STOP, FALSE);
 
             } else if (pcmk__str_eq(after->task, PCMK_ACTION_PROMOTE,
                                     pcmk__str_none)) {
                 then_actions = pe__resource_actions(after->rsc, NULL,
                                                     PCMK_ACTION_DEMOTE, FALSE);
             }
 
             for (iter = then_actions; iter != NULL; iter = iter->next) {
                 pe_action_t *then = (pe_action_t *) iter->data;
 
                 // Skip pseudo-actions (for example, those implied by fencing)
                 if (!pcmk_is_set(then->flags, pe_action_pseudo)) {
                     order_actions(probe, then, pe_order_optional);
                 }
             }
             g_list_free(then_actions);
     }
 
     /* Detect whether "then" is an interleaved clone action. For these, we want
      * to add orderings only for the relevant instance.
      */
     if ((after->rsc != NULL)
-        && (after->rsc->variant > pe_group)) {
+        && (after->rsc->variant > pcmk_rsc_variant_group)) {
         const char *interleave_s = g_hash_table_lookup(after->rsc->meta,
                                                        XML_RSC_ATTR_INTERLEAVE);
 
         interleave = crm_is_true(interleave_s);
         if (interleave) {
             compatible_rsc = pcmk__find_compatible_instance(probe->rsc,
                                                             after->rsc,
                                                             pcmk_role_unknown,
                                                             false);
         }
     }
 
     /* Now recursively do the same for all actions ordered after "then". This
      * also handles collective resources since the collective action will be
      * ordered before its individual instances' actions.
      */
     for (iter = after->actions_after; iter != NULL; iter = iter->next) {
         pe_action_wrapper_t *after_wrapper = (pe_action_wrapper_t *) iter->data;
 
         /* pe_order_implies_then is the reason why a required A.start
          * implies/enforces B.start to be required too, which is the cause of
          * B.restart/re-promote.
          *
          * Not sure about pe_order_implies_then_on_node though. It's now only
          * used for unfencing case, which tends to introduce transition
          * loops...
          */
         if (!pcmk_is_set(after_wrapper->type, pe_order_implies_then)) {
             /* The order type between a group/clone and its child such as
              * B.start-> B_child.start is:
              * pe_order_implies_first_printed | pe_order_runnable_left
              *
              * Proceed through the ordering chain and build dependencies with
              * its children.
              */
             if ((after->rsc == NULL)
-                || (after->rsc->variant < pe_group)
+                || (after->rsc->variant < pcmk_rsc_variant_group)
                 || (probe->rsc->parent == after->rsc)
                 || (after_wrapper->action->rsc == NULL)
-                || (after_wrapper->action->rsc->variant > pe_group)
+                || (after_wrapper->action->rsc->variant > pcmk_rsc_variant_group)
                 || (after->rsc != after_wrapper->action->rsc->parent)) {
                 continue;
             }
 
             /* Proceed to the children of a group or a non-interleaved clone.
              * For an interleaved clone, proceed only to the relevant child.
              */
-            if ((after->rsc->variant > pe_group) && interleave
+            if ((after->rsc->variant > pcmk_rsc_variant_group) && interleave
                 && ((compatible_rsc == NULL)
                     || (compatible_rsc != after_wrapper->action->rsc))) {
                 continue;
             }
         }
 
         crm_trace("Recursively adding probe restart orderings for "
                   "'%s@%s then %s@%s' (type=%#.6x)",
                   after->uuid, pe__node_name(after->node),
                   after_wrapper->action->uuid,
                   pe__node_name(after_wrapper->action->node),
                   after_wrapper->type);
 
         add_restart_orderings_for_probe(probe, after_wrapper->action);
     }
 }
 
 /*!
  * \internal
  * \brief Clear the tracking flag on all scheduled actions
  *
  * \param[in,out] data_set  Cluster working set
  */
 static void
 clear_actions_tracking_flag(pe_working_set_t *data_set)
 {
     for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
         pe_action_t *action = iter->data;
 
         pe__clear_action_flags(action, pe_action_tracking);
     }
 }
 
 /*!
  * \internal
  * \brief Add start and restart orderings for probes scheduled for a resource
  *
  * \param[in,out] data       Resource whose probes should be ordered
  * \param[in]     user_data  Unused
  */
 static void
 add_start_restart_orderings_for_rsc(gpointer data, gpointer user_data)
 {
     pe_resource_t *rsc = data;
     GList *probes = NULL;
 
     // For collective resources, order each instance recursively
     if (rsc->variant != pcmk_rsc_variant_primitive) {
         g_list_foreach(rsc->children, add_start_restart_orderings_for_rsc,
                        NULL);
         return;
     }
 
     // Find all probes for given resource
     probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
 
     // Add probe restart orderings for each probe found
     for (GList *iter = probes; iter != NULL; iter = iter->next) {
         pe_action_t *probe = (pe_action_t *) iter->data;
 
         for (GList *then_iter = probe->actions_after; then_iter != NULL;
              then_iter = then_iter->next) {
 
             pe_action_wrapper_t *then = (pe_action_wrapper_t *) then_iter->data;
 
             add_start_orderings_for_probe(probe, then);
             add_restart_orderings_for_probe(probe, then->action);
             clear_actions_tracking_flag(rsc->cluster);
         }
     }
 
     g_list_free(probes);
 }
 
 /*!
  * \internal
  * \brief Add "A then probe B" orderings for "A then B" orderings
  *
  * \param[in,out] data_set  Cluster working set
  *
  * \note This function is currently disabled (see next comment).
  */
 static void
 order_then_probes(pe_working_set_t *data_set)
 {
 #if 0
     /* Given an ordering "A then B", we would prefer to wait for A to be started
      * before probing B.
      *
      * For example, if A is a filesystem which B can't even run without, it
      * would be helpful if the author of B's agent could assume that A is
      * running before B.monitor will be called.
      *
      * However, we can't _only_ probe after A is running, otherwise we wouldn't
      * detect the state of B if A could not be started. We can't even do an
      * opportunistic version of this, because B may be moving:
      *
      *   A.stop -> A.start -> B.probe -> B.stop -> B.start
      *
      * and if we add B.stop -> A.stop here, we get a loop:
      *
      *   A.stop -> A.start -> B.probe -> B.stop -> A.stop
      *
      * We could kill the "B.probe -> B.stop" dependency, but that could mean
      * stopping B "too" soon, because B.start must wait for the probe, and
      * we don't want to stop B if we can't start it.
      *
      * We could add the ordering only if A is an anonymous clone with
      * clone-max == node-max (since we'll never be moving it). However, we could
      * still be stopping one instance at the same time as starting another.
      *
      * The complexity of checking for allowed conditions combined with the ever
      * narrowing use case suggests that this code should remain disabled until
      * someone gets smarter.
      */
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         pe_action_t *start = NULL;
         GList *actions = NULL;
         GList *probes = NULL;
 
         actions = pe__resource_actions(rsc, NULL, PCMK_ACTION_START, FALSE);
 
         if (actions) {
             start = actions->data;
             g_list_free(actions);
         }
 
         if (start == NULL) {
             crm_err("No start action for %s", rsc->id);
             continue;
         }
 
         probes = pe__resource_actions(rsc, NULL, PCMK_ACTION_MONITOR, FALSE);
 
         for (actions = start->actions_before; actions != NULL;
              actions = actions->next) {
 
             pe_action_wrapper_t *before = (pe_action_wrapper_t *) actions->data;
 
             pe_action_t *first = before->action;
             pe_resource_t *first_rsc = first->rsc;
 
             if (first->required_runnable_before) {
                 for (GList *clone_actions = first->actions_before;
                      clone_actions != NULL;
                      clone_actions = clone_actions->next) {
 
                     before = (pe_action_wrapper_t *) clone_actions->data;
 
                     crm_trace("Testing '%s then %s' for %s",
                               first->uuid, before->action->uuid, start->uuid);
 
                     CRM_ASSERT(before->action->rsc != NULL);
                     first_rsc = before->action->rsc;
                     break;
                 }
 
             } else if (!pcmk__str_eq(first->task, PCMK_ACTION_START,
                                      pcmk__str_none)) {
                 crm_trace("Not a start op %s for %s", first->uuid, start->uuid);
             }
 
             if (first_rsc == NULL) {
                 continue;
 
             } else if (pe__const_top_resource(first_rsc, false)
                        == pe__const_top_resource(start->rsc, false)) {
                 crm_trace("Same parent %s for %s", first_rsc->id, start->uuid);
                 continue;
 
             } else if (!pe_rsc_is_clone(pe__const_top_resource(first_rsc,
                                                                false))) {
                 crm_trace("Not a clone %s for %s", first_rsc->id, start->uuid);
                 continue;
             }
 
             crm_err("Applying %s before %s %d", first->uuid, start->uuid,
                     pe__const_top_resource(first_rsc, false)->variant);
 
             for (GList *probe_iter = probes; probe_iter != NULL;
                  probe_iter = probe_iter->next) {
 
                 pe_action_t *probe = (pe_action_t *) probe_iter->data;
 
                 crm_err("Ordering %s before %s", first->uuid, probe->uuid);
                 order_actions(first, probe, pe_order_optional);
             }
         }
     }
 #endif
 }
 
 void
 pcmk__order_probes(pe_working_set_t *data_set)
 {
     // Add orderings for "probe then X"
     g_list_foreach(data_set->resources, add_start_restart_orderings_for_rsc,
                    NULL);
     add_probe_orderings_for_stops(data_set);
 
     order_then_probes(data_set);
 }
 
 /*!
  * \internal
  * \brief Schedule any probes needed
  *
  * \param[in,out] data_set  Cluster working set
  *
  * \note This may also schedule fencing of failed remote nodes.
  */
 void
 pcmk__schedule_probes(pe_working_set_t *data_set)
 {
     // Schedule probes on each node in the cluster as needed
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         const char *probed = NULL;
 
         if (!node->details->online) { // Don't probe offline nodes
             if (pcmk__is_failed_remote_node(node)) {
                 pe_fence_node(data_set, node,
                               "the connection is unrecoverable", FALSE);
             }
             continue;
 
         } else if (node->details->unclean) { // ... or nodes that need fencing
             continue;
 
         } else if (!node->details->rsc_discovery_enabled) {
             // The user requested that probes not be done on this node
             continue;
         }
 
         /* This is no longer needed for live clusters, since the probe_complete
          * node attribute will never be in the CIB. However this is still useful
          * for processing old saved CIBs (< 1.1.14), including the
          * reprobe-target_rc regression test.
          */
         probed = pe_node_attribute_raw(node, CRM_OP_PROBED);
         if (probed != NULL && crm_is_true(probed) == FALSE) {
             pe_action_t *probe_op = NULL;
 
             probe_op = custom_action(NULL,
                                      crm_strdup_printf("%s-%s", CRM_OP_REPROBE,
                                                        node->details->uname),
                                      CRM_OP_REPROBE, node, FALSE, TRUE,
                                      data_set);
             add_hash_param(probe_op->meta, XML_ATTR_TE_NOWAIT,
                            XML_BOOLEAN_TRUE);
             continue;
         }
 
         // Probe each resource in the cluster on this node, as needed
         pcmk__probe_resource_list(data_set->resources, node);
     }
 }
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index ce78aef608..d544c3fe2b 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1178 +1,1178 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/scheduler_internal.h>
 
 #include "pe_status_private.h"
 
 void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
 
 static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
                               unsigned int *count_clean);
 
 resource_object_functions_t resource_class_functions[] = {
     {
          native_unpack,
          native_find_rsc,
          native_parameter,
          native_print,
          native_active,
          native_resource_state,
          native_location,
          native_free,
          pe__count_common,
          pe__native_is_filtered,
          active_node,
          pe__primitive_max_per_node,
     },
     {
          group_unpack,
          native_find_rsc,
          native_parameter,
          group_print,
          group_active,
          group_resource_state,
          native_location,
          group_free,
          pe__count_common,
          pe__group_is_filtered,
          active_node,
          pe__group_max_per_node,
     },
     {
          clone_unpack,
          native_find_rsc,
          native_parameter,
          clone_print,
          clone_active,
          clone_resource_state,
          native_location,
          clone_free,
          pe__count_common,
          pe__clone_is_filtered,
          active_node,
          pe__clone_max_per_node,
     },
     {
          pe__unpack_bundle,
          native_find_rsc,
          native_parameter,
          pe__print_bundle,
          pe__bundle_active,
          pe__bundle_resource_state,
          native_location,
          pe__free_bundle,
          pe__count_bundle,
          pe__bundle_is_filtered,
          pe__bundle_active_node,
          pe__bundle_max_per_node,
     }
 };
 
 static enum pe_obj_types
 get_resource_type(const char *name)
 {
     if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
         return pcmk_rsc_variant_primitive;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
-        return pe_group;
+        return pcmk_rsc_variant_group;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
         return pe_clone;
 
     } else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
         // @COMPAT deprecated since 2.0.0
         return pe_clone;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
         return pe_container;
     }
 
     return pcmk_rsc_variant_unknown;
 }
 
 static void
 dup_attr(gpointer key, gpointer value, gpointer user_data)
 {
     add_hash_param(user_data, key, value);
 }
 
 static void
 expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
 {
     GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
     pe_resource_t *p = rsc->parent;
 
     if (p == NULL) {
         return ;
     }
 
     /* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
     /* The fixed value of the lower parent resource takes precedence and is not overwritten. */
     while(p != NULL) {
         /* A hash table for comparison is generated, including the id-ref. */
         pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
                                rule_data, parent_orig_meta, NULL, FALSE, data_set);
         p = p->parent; 
     }
 
     /* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
     if (parent_orig_meta != NULL) {
         GHashTableIter iter;
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, parent_orig_meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
             /* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
             /* Attributes that already exist in the child lease are not updated. */
             dup_attr(key, value, meta_hash);
         }
     }
 
     if (parent_orig_meta != NULL) {
         g_hash_table_destroy(parent_orig_meta);
     }
     
     return ;
 
 }
 void
 get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
                     pe_node_t * node, pe_working_set_t * data_set)
 {
     pe_rsc_eval_data_t rsc_rule_data = {
         .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
         .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
         .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
     };
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = pcmk_role_unknown,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = &rsc_rule_data,
         .op_data = NULL
     };
 
     if (node) {
         rule_data.node_hash = node->details->attrs;
     }
 
     for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
         const char *prop_name = (const char *) a->name;
         const char *prop_value = pcmk__xml_attr_value(a);
 
         add_hash_param(meta_hash, prop_name, prop_value);
     }
 
     pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
                                meta_hash, NULL, FALSE, data_set);
 
     /* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
     /* If it is already explicitly set as a child, it will not be overwritten. */
     if (rsc->parent != NULL) {
         expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
     }
 
     /* check the defaults */
     pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
                                &rule_data, meta_hash, NULL, FALSE, data_set);
 
     /* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
     /* The values already set up to this point will not be overwritten. */
     if (rsc->parent) {
         g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
     }
 }
 
 void
 get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
                    const pe_node_t *node, pe_working_set_t *data_set)
 {
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = pcmk_role_unknown,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     if (node) {
         rule_data.node_hash = node->details->attrs;
     }
 
     pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
                                meta_hash, NULL, FALSE, data_set);
 
     /* set anything else based on the parent */
     if (rsc->parent != NULL) {
         get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
 
     } else {
         /* and finally check the defaults */
         pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
                                    &rule_data, meta_hash, NULL, FALSE, data_set);
     }
 }
 
 static char *
 template_op_key(xmlNode * op)
 {
     const char *name = crm_element_value(op, "name");
     const char *role = crm_element_value(op, "role");
     char *key = NULL;
 
     if ((role == NULL)
         || pcmk__strcase_any_of(role, PCMK__ROLE_STARTED, PCMK__ROLE_UNPROMOTED,
                                 PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
         role = PCMK__ROLE_UNKNOWN;
     }
 
     key = crm_strdup_printf("%s-%s", name, role);
     return key;
 }
 
 static gboolean
 unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     xmlNode *cib_resources = NULL;
     xmlNode *template = NULL;
     xmlNode *new_xml = NULL;
     xmlNode *child_xml = NULL;
     xmlNode *rsc_ops = NULL;
     xmlNode *template_ops = NULL;
     const char *template_ref = NULL;
     const char *clone = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for template unpacking");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", xml_obj->name);
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
     if (cib_resources == NULL) {
         pe_err("No resources configured");
         return FALSE;
     }
 
     template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
                               XML_ATTR_ID, template_ref);
     if (template == NULL) {
         pe_err("No template named '%s'", template_ref);
         return FALSE;
     }
 
     new_xml = copy_xml(template);
     xmlNodeSetName(new_xml, xml_obj->name);
     crm_xml_replace(new_xml, XML_ATTR_ID, id);
 
     clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
     if(clone) {
         crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
     }
 
     template_ops = find_xml_node(new_xml, "operations", FALSE);
 
     for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
          child_xml = pcmk__xe_next(child_xml)) {
         xmlNode *new_child = NULL;
 
         new_child = add_node_copy(new_xml, child_xml);
 
         if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
             rsc_ops = new_child;
         }
     }
 
     if (template_ops && rsc_ops) {
         xmlNode *op = NULL;
         GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
 
         for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
              op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             g_hash_table_insert(rsc_ops_hash, key, op);
         }
 
         for (op = pcmk__xe_first_child(template_ops); op != NULL;
              op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
                 add_node_copy(rsc_ops, op);
             }
 
             free(key);
         }
 
         if (rsc_ops_hash) {
             g_hash_table_destroy(rsc_ops_hash);
         }
 
         free_xml(template_ops);
     }
 
     /*free_xml(*expanded_xml); */
     *expanded_xml = new_xml;
 
     /* Disable multi-level templates for now */
     /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
        free_xml(*expanded_xml);
        *expanded_xml = NULL;
 
        return FALSE;
        } */
 
     return TRUE;
 }
 
 static gboolean
 add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     const char *template_ref = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for processing resource list of template");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", xml_obj->name);
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
         return FALSE;
     }
 
     return TRUE;
 }
 
 static bool
 detect_promotable(pe_resource_t *rsc)
 {
     const char *promotable = g_hash_table_lookup(rsc->meta,
                                                  XML_RSC_ATTR_PROMOTABLE);
 
     if (crm_is_true(promotable)) {
         return TRUE;
     }
 
     // @COMPAT deprecated since 2.0.0
     if (pcmk__xe_is(rsc->xml, PCMK_XE_PROMOTABLE_LEGACY)) {
         /* @TODO in some future version, pe_warn_once() here,
          *       then drop support in even later version
          */
         g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
                             strdup(XML_BOOLEAN_TRUE));
         return TRUE;
     }
     return FALSE;
 }
 
 static void
 free_params_table(gpointer data)
 {
     g_hash_table_destroy((GHashTable *) data);
 }
 
 /*!
  * \brief Get a table of resource parameters
  *
  * \param[in,out] rsc       Resource to query
  * \param[in]     node      Node for evaluating rules (NULL for defaults)
  * \param[in,out] data_set  Cluster working set
  *
  * \return Hash table containing resource parameter names and values
  *         (or NULL if \p rsc or \p data_set is NULL)
  * \note The returned table will be destroyed when the resource is freed, so
  *       callers should not destroy it.
  */
 GHashTable *
 pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
               pe_working_set_t *data_set)
 {
     GHashTable *params_on_node = NULL;
 
     /* A NULL node is used to request the resource's default parameters
      * (not evaluated for node), but we always want something non-NULL
      * as a hash table key.
      */
     const char *node_name = "";
 
     // Sanity check
     if ((rsc == NULL) || (data_set == NULL)) {
         return NULL;
     }
     if ((node != NULL) && (node->details->uname != NULL)) {
         node_name = node->details->uname;
     }
 
     // Find the parameter table for given node
     if (rsc->parameter_cache == NULL) {
         rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
     } else {
         params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
     }
 
     // If none exists yet, create one with parameters evaluated for node
     if (params_on_node == NULL) {
         params_on_node = pcmk__strkey_table(free, free);
         get_rsc_attributes(params_on_node, rsc, node, data_set);
         g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
                             params_on_node);
     }
     return params_on_node;
 }
 
 /*!
  * \internal
  * \brief Unpack a resource's "requires" meta-attribute
  *
  * \param[in,out] rsc         Resource being unpacked
  * \param[in]     value       Value of "requires" meta-attribute
  * \param[in]     is_default  Whether \p value was selected by default
  */
 static void
 unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
 {
     if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
 
     } else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
         pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
 
     } else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
         pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
         if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
             pcmk__config_warn("%s requires fencing but fencing is disabled",
                               rsc->id);
         }
 
     } else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
         if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
             pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
                               "to \"" PCMK__VALUE_QUORUM "\" because fencing "
                               "devices cannot require unfencing", rsc->id);
             unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
             return;
 
         } else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
             pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
                               "to \"" PCMK__VALUE_QUORUM "\" because fencing "
                               "is disabled", rsc->id);
             unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
             return;
 
         } else {
             pe__set_resource_flags(rsc,
                                    pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
         }
 
     } else {
         const char *orig_value = value;
 
         if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
             value = PCMK__VALUE_QUORUM;
 
         } else if ((rsc->variant == pcmk_rsc_variant_primitive)
                    && xml_contains_remote_node(rsc->xml)) {
             value = PCMK__VALUE_QUORUM;
 
         } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
             value = PCMK__VALUE_UNFENCING;
 
         } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
             value = PCMK__VALUE_FENCING;
 
         } else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
             value = PCMK__VALUE_NOTHING;
 
         } else {
             value = PCMK__VALUE_QUORUM;
         }
 
         if (orig_value != NULL) {
             pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
                              "to '%s' because '%s' is not valid",
                               rsc->id, value, orig_value);
         }
         unpack_requires(rsc, value, true);
         return;
     }
 
     pe_rsc_trace(rsc, "\tRequired to start: %s%s", value,
                  (is_default? " (default)" : ""));
 }
 
 #ifndef PCMK__COMPAT_2_0
 static void
 warn_about_deprecated_classes(pe_resource_t *rsc)
 {
     const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
     if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
         pe_warn_once(pe_wo_upstart,
                      "Support for Upstart resources (such as %s) is deprecated "
                      "and will be removed in a future release of Pacemaker",
                      rsc->id);
 
     } else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
         pe_warn_once(pe_wo_nagios,
                      "Support for Nagios resources (such as %s) is deprecated "
                      "and will be removed in a future release of Pacemaker",
                      rsc->id);
     }
 }
 #endif
 
 /*!
  * \internal
  * \brief Unpack configuration XML for a given resource
  *
  * Unpack the XML object containing a resource's configuration into a new
  * \c pe_resource_t object.
  *
  * \param[in]     xml_obj   XML node containing the resource's configuration
  * \param[out]    rsc       Where to store the unpacked resource information
  * \param[in]     parent    Resource's parent, if any
  * \param[in,out] data_set  Cluster working set
  *
  * \return Standard Pacemaker return code
  * \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
  *       the caller is responsible for freeing it using its variant-specific
  *       free() method. Otherwise, \p *rsc is guaranteed to be NULL.
  */
 int
 pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
                     pe_resource_t *parent, pe_working_set_t *data_set)
 {
     xmlNode *expanded_xml = NULL;
     xmlNode *ops = NULL;
     const char *value = NULL;
     const char *id = NULL;
     bool guest_node = false;
     bool remote_node = false;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = pcmk_role_unknown,
         .now = NULL,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     CRM_CHECK(rsc != NULL, return EINVAL);
     CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
               *rsc = NULL;
               return EINVAL);
 
     rule_data.now = data_set->now;
 
     crm_log_xml_trace(xml_obj, "[raw XML]");
 
     id = crm_element_value(xml_obj, XML_ATTR_ID);
     if (id == NULL) {
         pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
                xml_obj->name);
         return pcmk_rc_unpack_error;
     }
 
     if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
         return pcmk_rc_unpack_error;
     }
 
     *rsc = calloc(1, sizeof(pe_resource_t));
     if (*rsc == NULL) {
         crm_crit("Unable to allocate memory for resource '%s'", id);
         return ENOMEM;
     }
     (*rsc)->cluster = data_set;
 
     if (expanded_xml) {
         crm_log_xml_trace(expanded_xml, "[expanded XML]");
         (*rsc)->xml = expanded_xml;
         (*rsc)->orig_xml = xml_obj;
 
     } else {
         (*rsc)->xml = xml_obj;
         (*rsc)->orig_xml = NULL;
     }
 
     /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
 
     (*rsc)->parent = parent;
 
     ops = find_xml_node((*rsc)->xml, "operations", FALSE);
     (*rsc)->ops_xml = expand_idref(ops, data_set->input);
 
     (*rsc)->variant = get_resource_type((const char *) (*rsc)->xml->name);
     if ((*rsc)->variant == pcmk_rsc_variant_unknown) {
         pe_err("Ignoring resource '%s' of unknown type '%s'",
                id, (*rsc)->xml->name);
         common_free(*rsc);
         *rsc = NULL;
         return pcmk_rc_unpack_error;
     }
 
 #ifndef PCMK__COMPAT_2_0
     warn_about_deprecated_classes(*rsc);
 #endif
 
     (*rsc)->meta = pcmk__strkey_table(free, free);
     (*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
     (*rsc)->known_on = pcmk__strkey_table(NULL, free);
 
     value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
     if (value) {
         (*rsc)->id = crm_strdup_printf("%s:%s", id, value);
         add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
 
     } else {
         (*rsc)->id = strdup(id);
     }
 
     (*rsc)->fns = &resource_class_functions[(*rsc)->variant];
 
     get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
     (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
 
     (*rsc)->flags = 0;
     pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
 
     if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         pe__set_resource_flags(*rsc, pe_rsc_managed);
     }
 
     (*rsc)->rsc_cons = NULL;
     (*rsc)->rsc_tickets = NULL;
     (*rsc)->actions = NULL;
     (*rsc)->role = pcmk_role_stopped;
     (*rsc)->next_role = pcmk_role_unknown;
 
     (*rsc)->recovery_type = pcmk_multiply_active_restart;
     (*rsc)->stickiness = 0;
     (*rsc)->migration_threshold = INFINITY;
     (*rsc)->failure_timeout = 0;
 
     value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
     (*rsc)->priority = char2score(value);
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
     if ((value == NULL) || crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_critical);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
     if (crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_notify);
     }
 
     if (xml_contains_remote_node((*rsc)->xml)) {
         (*rsc)->is_remote_node = TRUE;
         if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
             guest_node = true;
         } else {
             remote_node = true;
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
     if (crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
     } else if ((value == NULL) && remote_node) {
         /* By default, we want remote nodes to be able
          * to float around the cluster without having to stop all the
          * resources within the remote-node before moving. Allowing
          * migration support enables this feature. If this ever causes
          * problems, migration support can be explicitly turned off with
          * allow-migrate=false.
          */
         pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         if (crm_is_true(value)) {
             pe__set_resource_flags(*rsc, pe_rsc_managed);
         } else {
             pe__clear_resource_flags(*rsc, pe_rsc_managed);
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
     if (crm_is_true(value)) {
         pe__clear_resource_flags(*rsc, pe_rsc_managed);
         pe__set_resource_flags(*rsc, pe_rsc_maintenance);
     }
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         pe__clear_resource_flags(*rsc, pe_rsc_managed);
         pe__set_resource_flags(*rsc, pe_rsc_maintenance);
     }
 
     if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
         value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
         if (crm_is_true(value)) {
             pe__set_resource_flags(*rsc, pe_rsc_unique);
         }
         if (detect_promotable(*rsc)) {
             pe__set_resource_flags(*rsc, pe_rsc_promotable);
         }
     } else {
         pe__set_resource_flags(*rsc, pe_rsc_unique);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
     if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
         (*rsc)->restart_type = pe_restart_restart;
         pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
                      (*rsc)->id);
         pe_warn_once(pe_wo_restart_type,
                      "Support for restart-type is deprecated and will be removed in a future release");
 
     } else {
         (*rsc)->restart_type = pe_restart_ignore;
         pe_rsc_trace((*rsc), "%s dependency restart handling: ignore",
                      (*rsc)->id);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
     if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
         (*rsc)->recovery_type = pcmk_multiply_active_stop;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
                      (*rsc)->id);
 
     } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
         (*rsc)->recovery_type = pcmk_multiply_active_block;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
                      (*rsc)->id);
 
     } else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
         (*rsc)->recovery_type = pcmk_multiply_active_unexpected;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
                              "stop unexpected instances",
                      (*rsc)->id);
 
     } else { // "stop_start"
         if (!pcmk__str_eq(value, "stop_start",
                           pcmk__str_casei|pcmk__str_null_matches)) {
             pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
                     ", using default of \"stop_start\"", value);
         }
         (*rsc)->recovery_type = pcmk_multiply_active_restart;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
                              "stop/start", (*rsc)->id);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         (*rsc)->stickiness = char2score(value);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         (*rsc)->migration_threshold = char2score(value);
         if ((*rsc)->migration_threshold < 0) {
             /* @TODO We use 1 here to preserve previous behavior, but this
              * should probably use the default (INFINITY) or 0 (to disable)
              * instead.
              */
             pe_warn_once(pe_wo_neg_threshold,
                          XML_RSC_ATTR_FAIL_STICKINESS
                          " must be non-negative, using 1 instead");
             (*rsc)->migration_threshold = 1;
         }
     }
 
     if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
                      PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
         pe__set_resource_flags(*rsc, pe_rsc_fence_device);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
     unpack_requires(*rsc, value, false);
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
     if (value != NULL) {
         // Stored as seconds
         (*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
     }
 
     if (remote_node) {
         GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
 
         /* Grabbing the value now means that any rules based on node attributes
          * will evaluate to false, so such rules should not be used with
          * reconnect_interval.
          *
          * @TODO Evaluate per node before using
          */
         value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
         if (value) {
             /* reconnect delay works by setting failure_timeout and preventing the
              * connection from starting until the failure is cleared. */
             (*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
             /* we want to override any default failure_timeout in use when remote
              * reconnect_interval is in use. */ 
             (*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
         }
     }
 
     get_target_role(*rsc, &((*rsc)->next_role));
     pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
                  (*rsc)->next_role != pcmk_role_unknown? role2text((*rsc)->next_role) : "default");
 
     if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
         (*rsc)->fns->free(*rsc);
         *rsc = NULL;
         return pcmk_rc_unpack_error;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
         // This tag must stay exactly the same because it is tested elsewhere
         resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
     } else if (guest_node) {
         /* remote resources tied to a container resource must always be allowed
          * to opt-in to the cluster. Whether the connection resource is actually
          * allowed to be placed on a node is dependent on the container resource */
         resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
     }
 
     pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
                  pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
 
     (*rsc)->utilization = pcmk__strkey_table(free, free);
 
     pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
                                (*rsc)->utilization, NULL, FALSE, data_set);
 
     if (expanded_xml) {
         if (add_template_rsc(xml_obj, data_set) == FALSE) {
             (*rsc)->fns->free(*rsc);
             *rsc = NULL;
             return pcmk_rc_unpack_error;
         }
     }
     return pcmk_rc_ok;
 }
 
 gboolean
 is_parent(pe_resource_t *child, pe_resource_t *rsc)
 {
     pe_resource_t *parent = child;
 
     if (parent == NULL || rsc == NULL) {
         return FALSE;
     }
     while (parent->parent != NULL) {
         if (parent->parent == rsc) {
             return TRUE;
         }
         parent = parent->parent;
     }
     return FALSE;
 }
 
 pe_resource_t *
 uber_parent(pe_resource_t * rsc)
 {
     pe_resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while (parent->parent != NULL && parent->parent->variant != pe_container) {
         parent = parent->parent;
     }
     return parent;
 }
 
 /*!
  * \internal
  * \brief Get the topmost parent of a resource as a const pointer
  *
  * \param[in] rsc             Resource to check
  * \param[in] include_bundle  If true, go all the way to bundle
  *
  * \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
  *         the bundle if \p rsc is bundled and \p include_bundle is true,
  *         otherwise the topmost parent of \p rsc up to a clone
  */
 const pe_resource_t *
 pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
 {
     const pe_resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while (parent->parent != NULL) {
         if (!include_bundle && (parent->parent->variant == pe_container)) {
             break;
         }
         parent = parent->parent;
     }
     return parent;
 }
 
 void
 common_free(pe_resource_t * rsc)
 {
     if (rsc == NULL) {
         return;
     }
 
     pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
 
     g_list_free(rsc->rsc_cons);
     g_list_free(rsc->rsc_cons_lhs);
     g_list_free(rsc->rsc_tickets);
     g_list_free(rsc->dangling_migrations);
 
     if (rsc->parameter_cache != NULL) {
         g_hash_table_destroy(rsc->parameter_cache);
     }
     if (rsc->meta != NULL) {
         g_hash_table_destroy(rsc->meta);
     }
     if (rsc->utilization != NULL) {
         g_hash_table_destroy(rsc->utilization);
     }
 
     if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
         free_xml(rsc->orig_xml);
         rsc->orig_xml = NULL;
 
         /* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
     } else if (rsc->orig_xml) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
     }
     if (rsc->running_on) {
         g_list_free(rsc->running_on);
         rsc->running_on = NULL;
     }
     if (rsc->known_on) {
         g_hash_table_destroy(rsc->known_on);
         rsc->known_on = NULL;
     }
     if (rsc->actions) {
         g_list_free(rsc->actions);
         rsc->actions = NULL;
     }
     if (rsc->allowed_nodes) {
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = NULL;
     }
     g_list_free(rsc->fillers);
     g_list_free(rsc->rsc_location);
     pe_rsc_trace(rsc, "Resource freed");
     free(rsc->id);
     free(rsc->clone_name);
     free(rsc->allocated_to);
     free(rsc->variant_opaque);
     free(rsc->pending_task);
     free(rsc);
 }
 
 /*!
  * \internal
  * \brief Count a node and update most preferred to it as appropriate
  *
  * \param[in]     rsc          An active resource
  * \param[in]     node         A node that \p rsc is active on
  * \param[in,out] active       This will be set to \p node if \p node is more
  *                             preferred than the current value
  * \param[in,out] count_all    If not NULL, this will be incremented
  * \param[in,out] count_clean  If not NULL, this will be incremented if \p node
  *                             is online and clean
  *
  * \return true if the count should continue, or false if sufficiently known
  */
 bool
 pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
                       pe_node_t **active, unsigned int *count_all,
                       unsigned int *count_clean)
 {
     bool keep_looking = false;
     bool is_happy = false;
 
     CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
               return false);
 
     is_happy = node->details->online && !node->details->unclean;
 
     if (count_all != NULL) {
         ++*count_all;
     }
     if ((count_clean != NULL) && is_happy) {
         ++*count_clean;
     }
     if ((count_all != NULL) || (count_clean != NULL)) {
         keep_looking = true; // We're counting, so go through entire list
     }
 
     if (rsc->partial_migration_source != NULL) {
         if (node->details == rsc->partial_migration_source->details) {
             *active = node; // This is the migration source
         } else {
             keep_looking = true;
         }
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         if (is_happy && ((*active == NULL) || !(*active)->details->online
                          || (*active)->details->unclean)) {
             *active = node; // This is the first clean node
         } else {
             keep_looking = true;
         }
     }
     if (*active == NULL) {
         *active = node; // This is the first node checked
     }
     return keep_looking;
 }
 
 // Shared implementation of resource_object_functions_t:active_node()
 static pe_node_t *
 active_node(const pe_resource_t *rsc, unsigned int *count_all,
             unsigned int *count_clean)
 {
     pe_node_t *active = NULL;
 
     if (count_all != NULL) {
         *count_all = 0;
     }
     if (count_clean != NULL) {
         *count_clean = 0;
     }
     if (rsc == NULL) {
         return NULL;
     }
     for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
         if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
                                    count_all, count_clean)) {
             break; // Don't waste time iterating if we don't have to
         }
     }
     return active;
 }
 
 /*!
  * \brief
  * \internal Find and count active nodes according to "requires"
  *
  * \param[in]  rsc    Resource to check
  * \param[out] count  If not NULL, will be set to count of active nodes
  *
  * \return An active node (or NULL if resource is not active anywhere)
  *
  * \note This is a convenience wrapper for active_node() where the count of all
  *       active nodes or only clean active nodes is desired according to the
  *       "requires" meta-attribute.
  */
 pe_node_t *
 pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
 {
     if (rsc == NULL) {
         if (count != NULL) {
             *count = 0;
         }
         return NULL;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         return rsc->fns->active_node(rsc, count, NULL);
 
     } else {
         return rsc->fns->active_node(rsc, NULL, count);
     }
 }
 
 void
 pe__count_common(pe_resource_t *rsc)
 {
     if (rsc->children != NULL) {
         for (GList *item = rsc->children; item != NULL; item = item->next) {
             ((pe_resource_t *) item->data)->fns->count(item->data);
         }
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
                || (rsc->role > pcmk_role_stopped)) {
         rsc->cluster->ninstances++;
         if (pe__resource_is_disabled(rsc)) {
             rsc->cluster->disabled_resources++;
         }
         if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
             rsc->cluster->blocked_resources++;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Update a resource's next role
  *
  * \param[in,out] rsc   Resource to be updated
  * \param[in]     role  Resource's new next role
  * \param[in]     why   Human-friendly reason why role is changing (for logs)
  */
 void
 pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
 {
     CRM_ASSERT((rsc != NULL) && (why != NULL));
     if (rsc->next_role != role) {
         pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
                      rsc->id, role2text(rsc->next_role), role2text(role), why);
         rsc->next_role = role;
     }
 }
diff --git a/lib/pengine/group.c b/lib/pengine/group.c
index 6277321ce4..2a1155fe93 100644
--- a/lib/pengine/group.c
+++ b/lib/pengine/group.c
@@ -1,536 +1,536 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdint.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/strings_internal.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 typedef struct group_variant_data_s {
     pe_resource_t *last_child;  // Last group member
     uint32_t flags;             // Group of enum pe__group_flags
 } group_variant_data_t;
 
 /*!
  * \internal
  * \brief Get a group's last member
  *
  * \param[in] group  Group resource to check
  *
  * \return Last member of \p group if any, otherwise NULL
  */
 pe_resource_t *
 pe__last_group_member(const pe_resource_t *group)
 {
     if (group != NULL) {
-        CRM_CHECK((group->variant == pe_group)
+        CRM_CHECK((group->variant == pcmk_rsc_variant_group)
                   && (group->variant_opaque != NULL), return NULL);
         return ((group_variant_data_t *) group->variant_opaque)->last_child;
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Check whether a group flag is set
  *
  * \param[in] group  Group resource to check
  * \param[in] flags  Flag or flags to check
  *
  * \return true if all \p flags are set for \p group, otherwise false
  */
 bool
 pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags)
 {
     group_variant_data_t *group_data = NULL;
 
-    CRM_CHECK((group != NULL) && (group->variant == pe_group)
+    CRM_CHECK((group != NULL) && (group->variant == pcmk_rsc_variant_group)
               && (group->variant_opaque != NULL), return false);
     group_data = (group_variant_data_t *) group->variant_opaque;
     return pcmk_all_flags_set(group_data->flags, flags);
 }
 
 /*!
  * \internal
  * \brief Set a (deprecated) group flag
  *
  * \param[in,out] group   Group resource to check
  * \param[in]     option  Name of boolean configuration option
  * \param[in]     flag    Flag to set if \p option is true (which is default)
  * \param[in]     wo_bit  "Warn once" flag to use for deprecation warning
  */
 static void
 set_group_flag(pe_resource_t *group, const char *option, uint32_t flag,
                uint32_t wo_bit)
 {
     const char *value_s = NULL;
     int value = 0;
 
     value_s = g_hash_table_lookup(group->meta, option);
 
     // We don't actually need the null check but it speeds up the common case
     if ((value_s == NULL) || (crm_str_to_boolean(value_s, &value) < 0)
         || (value != 0)) {
 
         ((group_variant_data_t *) group->variant_opaque)->flags |= flag;
 
     } else {
         pe_warn_once(wo_bit,
                      "Support for the '%s' group meta-attribute is deprecated "
                      "and will be removed in a future release "
                      "(use a resource set instead)", option);
     }
 }
 
 static int
 inactive_resources(pe_resource_t *rsc)
 {
     int retval = 0;
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (!child_rsc->fns->active(child_rsc, TRUE)) {
             retval++;
         }
     }
 
     return retval;
 }
 
 static void
 group_header(pcmk__output_t *out, int *rc, const pe_resource_t *rsc,
              int n_inactive, bool show_inactive, const char *desc)
 {
     GString *attrs = NULL;
 
     if (n_inactive > 0 && !show_inactive) {
         attrs = g_string_sized_new(64);
         g_string_append_printf(attrs, "%d member%s inactive", n_inactive,
                                pcmk__plural_s(n_inactive));
     }
 
     if (pe__resource_is_disabled(rsc)) {
         pcmk__add_separated_word(&attrs, 64, "disabled", ", ");
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
         pcmk__add_separated_word(&attrs, 64, "maintenance", ", ");
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pcmk__add_separated_word(&attrs, 64, "unmanaged", ", ");
     }
 
     if (attrs != NULL) {
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s (%s)%s%s%s",
                                  rsc->id,
                                  (const char *) attrs->str, desc ? " (" : "",
                                  desc ? desc : "", desc ? ")" : "");
         g_string_free(attrs, TRUE);
     } else {
         PCMK__OUTPUT_LIST_HEADER(out, FALSE, *rc, "Resource Group: %s%s%s%s",
                                  rsc->id,
                                  desc ? " (" : "", desc ? desc : "",
                                  desc ? ")" : "");
     }
 }
 
 static bool
 skip_child_rsc(pe_resource_t *rsc, pe_resource_t *child, gboolean parent_passes,
                GList *only_rsc, uint32_t show_opts)
 {
     bool star_list = pcmk__list_of_1(only_rsc) &&
                      pcmk__str_eq("*", g_list_first(only_rsc)->data, pcmk__str_none);
     bool child_filtered = child->fns->is_filtered(child, only_rsc, FALSE);
     bool child_active = child->fns->active(child, FALSE);
     bool show_inactive = pcmk_is_set(show_opts, pcmk_show_inactive_rscs);
 
     /* If the resource is in only_rsc by name (so, ignoring "*") then allow
      * it regardless of if it's active or not.
      */
     if (!star_list && !child_filtered) {
         return false;
 
     } else if (!child_filtered && (child_active || show_inactive)) {
         return false;
 
     } else if (parent_passes && (child_active || show_inactive)) {
         return false;
 
     }
 
     return true;
 }
 
 gboolean
 group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     xmlNode *xml_obj = rsc->xml;
     xmlNode *xml_native_rsc = NULL;
     group_variant_data_t *group_data = NULL;
     const char *clone_id = NULL;
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     group_data = calloc(1, sizeof(group_variant_data_t));
     group_data->last_child = NULL;
     rsc->variant_opaque = group_data;
 
     // @COMPAT These are deprecated since 2.1.5
     set_group_flag(rsc, XML_RSC_ATTR_ORDERED, pe__group_ordered,
                    pe_wo_group_order);
     set_group_flag(rsc, "collocated", pe__group_colocated, pe_wo_group_coloc);
 
     clone_id = crm_element_value(rsc->xml, XML_RSC_ATTR_INCARNATION);
 
     for (xml_native_rsc = pcmk__xe_first_child(xml_obj); xml_native_rsc != NULL;
          xml_native_rsc = pcmk__xe_next(xml_native_rsc)) {
 
         if (pcmk__str_eq((const char *)xml_native_rsc->name,
                          XML_CIB_TAG_RESOURCE, pcmk__str_none)) {
             pe_resource_t *new_rsc = NULL;
 
             crm_xml_add(xml_native_rsc, XML_RSC_ATTR_INCARNATION, clone_id);
             if (pe__unpack_resource(xml_native_rsc, &new_rsc, rsc,
                                     data_set) != pcmk_rc_ok) {
                 continue;
             }
 
             rsc->children = g_list_append(rsc->children, new_rsc);
             group_data->last_child = new_rsc;
             pe_rsc_trace(rsc, "Added %s member %s", rsc->id, new_rsc->id);
         }
     }
 
     if (rsc->children == NULL) {
         /* The schema does not allow empty groups, but if validation is
          * disabled, we allow them (members can be added later).
          *
          * @COMPAT At a major release bump, we should consider this a failure so
          *         that group methods can assume children is not NULL, and there
          *         are no strange effects from phantom groups due to their
          *         presence or meta-attributes.
          */
         pcmk__config_warn("Group %s will be ignored because it does not have "
                           "any members", rsc->id);
     }
     return TRUE;
 }
 
 gboolean
 group_active(pe_resource_t * rsc, gboolean all)
 {
     gboolean c_all = TRUE;
     gboolean c_any = FALSE;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (child_rsc->fns->active(child_rsc, all)) {
             c_any = TRUE;
         } else {
             c_all = FALSE;
         }
     }
 
     if (c_any == FALSE) {
         return FALSE;
     } else if (all && c_all == FALSE) {
         return FALSE;
     }
     return TRUE;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 group_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                 void *print_data)
 {
     GList *gIter = rsc->children;
     char *child_text = crm_strdup_printf("%s     ", pre_text);
 
     status_print("%s<group " XML_ATTR_ID "=\"%s\" ", pre_text, rsc->id);
     status_print("number_resources=\"%d\" ", g_list_length(rsc->children));
     status_print(">\n");
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->fns->print(child_rsc, child_text, options, print_data);
     }
 
     status_print("%s</group>\n", pre_text);
     free(child_text);
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 group_print(pe_resource_t *rsc, const char *pre_text, long options,
             void *print_data)
 {
     char *child_text = NULL;
     GList *gIter = rsc->children;
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     if (options & pe_print_xml) {
         group_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     child_text = crm_strdup_printf("%s    ", pre_text);
 
     status_print("%sResource Group: %s", pre_text ? pre_text : "", rsc->id);
 
     if (options & pe_print_html) {
         status_print("\n<ul>\n");
 
     } else if ((options & pe_print_log) == 0) {
         status_print("\n");
     }
 
     if (options & pe_print_brief) {
         print_rscs_brief(rsc->children, child_text, options, print_data, TRUE);
 
     } else {
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
             child_rsc->fns->print(child_rsc, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     
     const char *desc = NULL;
     GList *gIter = rsc->children;
 
     int rc = pcmk_rc_no_output;
 
     gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
                              (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
 
     desc = pe__resource_description(rsc, show_opts);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
             continue;
         }
 
         if (rc == pcmk_rc_no_output) {
             char *count = pcmk__itoa(g_list_length(gIter));
             const char *maint_s = pe__rsc_bool_str(rsc, pe_rsc_maintenance);
             const char *managed_s = pe__rsc_bool_str(rsc, pe_rsc_managed);
             const char *disabled_s = pcmk__btoa(pe__resource_is_disabled(rsc));
 
             rc = pe__name_and_nvpairs_xml(out, true, "group", 5,
                                           XML_ATTR_ID, rsc->id,
                                           "number_resources", count,
                                           "maintenance", maint_s,
                                           "managed", managed_s,
                                           "disabled", disabled_s,
                                           "description", desc);
             free(count);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         out->message(out, crm_map_element_name(child_rsc->xml), show_opts, child_rsc,
                      only_node, only_rsc);
     }
 
     if (rc == pcmk_rc_ok) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("group", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__group_default(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const char *desc = NULL;
     int rc = pcmk_rc_no_output;
 
     gboolean parent_passes = pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
                              (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches));
 
     gboolean active = rsc->fns->active(rsc, TRUE);
     gboolean partially_active = rsc->fns->active(rsc, FALSE);
 
     desc = pe__resource_description(rsc, show_opts);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_brief)) {
         GList *rscs = pe__filter_rsc_list(rsc->children, only_rsc);
 
         if (rscs != NULL) {
             group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
                          pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc);
             pe__rscs_brief_output(out, rscs, show_opts | pcmk_show_inactive_rscs);
 
             rc = pcmk_rc_ok;
             g_list_free(rscs);
         }
 
     } else {
         for (GList *gIter = rsc->children; gIter; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
             if (skip_child_rsc(rsc, child_rsc, parent_passes, only_rsc, show_opts)) {
                 continue;
             }
 
             group_header(out, &rc, rsc, !active && partially_active ? inactive_resources(rsc) : 0,
                          pcmk_is_set(show_opts, pcmk_show_inactive_rscs), desc);
             out->message(out, crm_map_element_name(child_rsc->xml), show_opts,
                          child_rsc, only_node, only_rsc);
         }
     }
 
 	PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     return rc;
 }
 
 void
 group_free(pe_resource_t * rsc)
 {
     CRM_CHECK(rsc != NULL, return);
 
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         CRM_ASSERT(child_rsc);
         pe_rsc_trace(child_rsc, "Freeing child %s", child_rsc->id);
         child_rsc->fns->free(child_rsc);
     }
 
     pe_rsc_trace(rsc, "Freeing child list");
     g_list_free(rsc->children);
 
     common_free(rsc);
 }
 
 enum rsc_role_e
 group_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e group_role = pcmk_role_unknown;
     GList *gIter = rsc->children;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         enum rsc_role_e role = child_rsc->fns->state(child_rsc, current);
 
         if (role > group_role) {
             group_role = role;
         }
     }
 
     pe_rsc_trace(rsc, "%s role: %s", rsc->id, role2text(group_role));
     return group_role;
 }
 
 gboolean
 pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                       gboolean check_parent)
 {
     gboolean passes = FALSE;
 
     if (check_parent
         && pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc,
                                                                      false)),
                              only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else if (strstr(rsc->id, ":") != NULL && pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else {
         for (const GList *iter = rsc->children;
              iter != NULL; iter = iter->next) {
 
             const pe_resource_t *child_rsc = (const pe_resource_t *) iter->data;
 
             if (!child_rsc->fns->is_filtered(child_rsc, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
 
 /*!
  * \internal
  * \brief Get maximum group resource instances per node
  *
  * \param[in] rsc  Group resource to check
  *
  * \return Maximum number of \p rsc instances that can be active on one node
  */
 unsigned int
 pe__group_max_per_node(const pe_resource_t *rsc)
 {
-    CRM_ASSERT((rsc != NULL) && (rsc->variant == pe_group));
+    CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_group));
     return 1U;
 }
diff --git a/lib/pengine/native.c b/lib/pengine/native.c
index 32ff68ef5f..569e241315 100644
--- a/lib/pengine/native.c
+++ b/lib/pengine/native.c
@@ -1,1439 +1,1440 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdint.h>
 
 #include <crm/common/output.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/complex.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <pe_status_private.h>
 
 #ifdef PCMK__COMPAT_2_0
 #define PROVIDER_SEP "::"
 #else
 #define PROVIDER_SEP ":"
 #endif
 
 /*!
  * \internal
  * \brief Check whether a resource is active on multiple nodes
  */
 static bool
 is_multiply_active(const pe_resource_t *rsc)
 {
     unsigned int count = 0;
 
     if (rsc->variant == pcmk_rsc_variant_primitive) {
         pe__find_active_requires(rsc, &count);
     }
     return count > 1;
 }
 
 static void
 native_priority_to_node(pe_resource_t * rsc, pe_node_t * node, gboolean failed)
 {
     int priority = 0;
 
     if ((rsc->priority == 0) || (failed == TRUE)) {
         return;
     }
 
     if (rsc->role == pcmk_role_promoted) {
         // Promoted instance takes base priority + 1
         priority = rsc->priority + 1;
 
     } else {
         priority = rsc->priority;
     }
 
     node->details->priority += priority;
     pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s)",
                  pe__node_name(node), node->details->priority,
                  (rsc->role == pcmk_role_promoted)? "promoted " : "",
                  rsc->id, rsc->priority,
                  (rsc->role == pcmk_role_promoted)? " + 1" : "");
 
     /* Priority of a resource running on a guest node is added to the cluster
      * node as well. */
     if (node->details->remote_rsc
         && node->details->remote_rsc->container) {
         GList *gIter = node->details->remote_rsc->container->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *a_node = gIter->data;
 
             a_node->details->priority += priority;
             pe_rsc_trace(rsc, "%s now has priority %d with %s'%s' (priority: %d%s) "
                          "from guest node %s",
                          pe__node_name(a_node), a_node->details->priority,
                          (rsc->role == pcmk_role_promoted)? "promoted " : "",
                          rsc->id, rsc->priority,
                          (rsc->role == pcmk_role_promoted)? " + 1" : "",
                          pe__node_name(node));
         }
     }
 }
 
 void
 native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed)
 {
     GList *gIter = rsc->running_on;
 
     CRM_CHECK(node != NULL, return);
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         CRM_CHECK(a_node != NULL, return);
         if (pcmk__str_eq(a_node->details->id, node->details->id, pcmk__str_casei)) {
             return;
         }
     }
 
     pe_rsc_trace(rsc, "Adding %s to %s %s", rsc->id, pe__node_name(node),
                  pcmk_is_set(rsc->flags, pe_rsc_managed)? "" : "(unmanaged)");
 
     rsc->running_on = g_list_append(rsc->running_on, node);
     if (rsc->variant == pcmk_rsc_variant_primitive) {
         node->details->running_rsc = g_list_append(node->details->running_rsc, rsc);
 
         native_priority_to_node(rsc, node, failed);
     }
 
     if ((rsc->variant == pcmk_rsc_variant_primitive)
         && node->details->maintenance) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
         pe__set_resource_flags(rsc, pe_rsc_maintenance);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_resource_t *p = rsc->parent;
 
         pe_rsc_info(rsc, "resource %s isn't managed", rsc->id);
         resource_location(rsc, node, INFINITY, "not_managed_default", data_set);
 
         while(p && node->details->online) {
             /* add without the additional location constraint */
             p->running_on = g_list_append(p->running_on, node);
             p = p->parent;
         }
         return;
     }
 
     if (is_multiply_active(rsc)) {
         switch (rsc->recovery_type) {
             case pcmk_multiply_active_stop:
                 {
                     GHashTableIter gIter;
                     pe_node_t *local_node = NULL;
 
                     /* make sure it doesn't come up again */
                     if (rsc->allowed_nodes != NULL) {
                         g_hash_table_destroy(rsc->allowed_nodes);
                     }
                     rsc->allowed_nodes = pe__node_list2table(data_set->nodes);
                     g_hash_table_iter_init(&gIter, rsc->allowed_nodes);
                     while (g_hash_table_iter_next(&gIter, NULL, (void **)&local_node)) {
                         local_node->weight = -INFINITY;
                     }
                 }
                 break;
             case pcmk_multiply_active_block:
                 pe__clear_resource_flags(rsc, pe_rsc_managed);
                 pe__set_resource_flags(rsc, pe_rsc_block);
 
                 /* If the resource belongs to a group or bundle configured with
                  * multiple-active=block, block the entire entity.
                  */
                 if (rsc->parent
-                    && (rsc->parent->variant == pe_group || rsc->parent->variant == pe_container)
+                    && ((rsc->parent->variant == pcmk_rsc_variant_group)
+                        || (rsc->parent->variant == pe_container))
                     && (rsc->parent->recovery_type == pcmk_multiply_active_block)) {
                     GList *gIter = rsc->parent->children;
 
                     for (; gIter != NULL; gIter = gIter->next) {
                         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
                         pe__clear_resource_flags(child, pe_rsc_managed);
                         pe__set_resource_flags(child, pe_rsc_block);
                     }
                 }
                 break;
 
             // pcmk_multiply_active_restart, pcmk_multiply_active_unexpected
             default:
                 /* The scheduler will do the right thing because the relevant
                  * variables and flags are set when unpacking the history.
                  */
                 break;
         }
         crm_debug("%s is active on multiple nodes including %s: %s",
                   rsc->id, pe__node_name(node),
                   recovery2text(rsc->recovery_type));
 
     } else {
         pe_rsc_trace(rsc, "Resource %s is active on %s",
                      rsc->id, pe__node_name(node));
     }
 
     if (rsc->parent != NULL) {
         native_add_running(rsc->parent, node, data_set, FALSE);
     }
 }
 
 static void
 recursive_clear_unique(pe_resource_t *rsc, gpointer user_data)
 {
     pe__clear_resource_flags(rsc, pe_rsc_unique);
     add_hash_param(rsc->meta, XML_RSC_ATTR_UNIQUE, XML_BOOLEAN_FALSE);
     g_list_foreach(rsc->children, (GFunc) recursive_clear_unique, NULL);
 }
 
 gboolean
 native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_resource_t *parent = uber_parent(rsc);
     const char *standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     uint32_t ra_caps = pcmk_get_ra_caps(standard);
 
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     // Only some agent standards support unique and promotable clones
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_unique)
         && pcmk_is_set(rsc->flags, pe_rsc_unique) && pe_rsc_is_clone(parent)) {
 
         /* @COMPAT We should probably reject this situation as an error (as we
          * do for promotable below) rather than warn and convert, but that would
          * be a backward-incompatible change that we should probably do with a
          * transform at a schema major version bump.
          */
         pe__force_anon(standard, parent, rsc->id, data_set);
 
         /* Clear globally-unique on the parent and all its descendants unpacked
          * so far (clearing the parent should make any future children unpacking
          * correct). We have to clear this resource explicitly because it isn't
          * hooked into the parent's children yet.
          */
         recursive_clear_unique(parent, NULL);
         recursive_clear_unique(rsc, NULL);
     }
     if (!pcmk_is_set(ra_caps, pcmk_ra_cap_promotable)
         && pcmk_is_set(parent->flags, pe_rsc_promotable)) {
 
         pe_err("Resource %s is of type %s and therefore "
                "cannot be used as a promotable clone resource",
                rsc->id, standard);
         return FALSE;
     }
     return TRUE;
 }
 
 static bool
 rsc_is_on_node(pe_resource_t *rsc, const pe_node_t *node, int flags)
 {
     pe_rsc_trace(rsc, "Checking whether %s is on %s",
                  rsc->id, pe__node_name(node));
 
     if (pcmk_is_set(flags, pe_find_current) && rsc->running_on) {
 
         for (GList *iter = rsc->running_on; iter; iter = iter->next) {
             pe_node_t *loc = (pe_node_t *) iter->data;
 
             if (loc->details == node->details) {
                 return true;
             }
         }
 
     } else if (pcmk_is_set(flags, pe_find_inactive)
                && (rsc->running_on == NULL)) {
         return true;
 
     } else if (!pcmk_is_set(flags, pe_find_current) && rsc->allocated_to
                && (rsc->allocated_to->details == node->details)) {
         return true;
     }
     return false;
 }
 
 pe_resource_t *
 native_find_rsc(pe_resource_t * rsc, const char *id, const pe_node_t *on_node,
                 int flags)
 {
     bool match = false;
     pe_resource_t *result = NULL;
 
     CRM_CHECK(id && rsc && rsc->id, return NULL);
 
     if (flags & pe_find_clone) {
         const char *rid = ID(rsc->xml);
 
         if (!pe_rsc_is_clone(pe__const_top_resource(rsc, false))) {
             match = false;
 
         } else if (!strcmp(id, rsc->id) || pcmk__str_eq(id, rid, pcmk__str_none)) {
             match = true;
         }
 
     } else if (!strcmp(id, rsc->id)) {
         match = true;
 
     } else if (pcmk_is_set(flags, pe_find_renamed)
                && rsc->clone_name && strcmp(rsc->clone_name, id) == 0) {
         match = true;
 
     } else if (pcmk_is_set(flags, pe_find_any)
                || (pcmk_is_set(flags, pe_find_anon)
                    && !pcmk_is_set(rsc->flags, pe_rsc_unique))) {
         match = pe_base_name_eq(rsc, id);
     }
 
     if (match && on_node) {
         if (!rsc_is_on_node(rsc, on_node, flags)) {
             match = false;
         }
     }
 
     if (match) {
         return rsc;
     }
 
     for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         result = rsc->fns->find_rsc(child, id, on_node, flags);
         if (result) {
             return result;
         }
     }
     return NULL;
 }
 
 // create is ignored
 char *
 native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                  pe_working_set_t * data_set)
 {
     char *value_copy = NULL;
     const char *value = NULL;
     GHashTable *params = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
     CRM_CHECK(name != NULL && strlen(name) != 0, return NULL);
 
     pe_rsc_trace(rsc, "Looking up %s in %s", name, rsc->id);
     params = pe_rsc_params(rsc, node, data_set);
     value = g_hash_table_lookup(params, name);
     if (value == NULL) {
         /* try meta attributes instead */
         value = g_hash_table_lookup(rsc->meta, name);
     }
     pcmk__str_update(&value_copy, value);
     return value_copy;
 }
 
 gboolean
 native_active(pe_resource_t * rsc, gboolean all)
 {
     for (GList *gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *a_node = (pe_node_t *) gIter->data;
 
         if (a_node->details->unclean) {
             pe_rsc_trace(rsc, "Resource %s: %s is unclean",
                          rsc->id, pe__node_name(a_node));
             return TRUE;
         } else if (a_node->details->online == FALSE && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             pe_rsc_trace(rsc, "Resource %s: %s is offline",
                          rsc->id, pe__node_name(a_node));
         } else {
             pe_rsc_trace(rsc, "Resource %s active on %s",
                          rsc->id, pe__node_name(a_node));
             return TRUE;
         }
     }
     return FALSE;
 }
 
 struct print_data_s {
     long options;
     void *print_data;
 };
 
 static const char *
 native_pending_state(const pe_resource_t *rsc)
 {
     const char *pending_state = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_START, pcmk__str_casei)) {
         pending_state = "Starting";
 
     } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_STOP,
                             pcmk__str_casei)) {
         pending_state = "Stopping";
 
     } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_TO,
                             pcmk__str_casei)) {
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MIGRATE_FROM,
                             pcmk__str_casei)) {
        /* Work might be done in here. */
         pending_state = "Migrating";
 
     } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_PROMOTE,
                             pcmk__str_casei)) {
         pending_state = "Promoting";
 
     } else if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_DEMOTE,
                             pcmk__str_casei)) {
         pending_state = "Demoting";
     }
 
     return pending_state;
 }
 
 static const char *
 native_pending_task(const pe_resource_t *rsc)
 {
     const char *pending_task = NULL;
 
     if (pcmk__str_eq(rsc->pending_task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
         pending_task = "Monitoring";
 
     /* Pending probes are not printed, even if pending
      * operations are requested. If someone ever requests that
      * behavior, uncomment this and the corresponding part of
      * unpack.c:unpack_rsc_op().
      */
     /*
     } else if (pcmk__str_eq(rsc->pending_task, "probe", pcmk__str_casei)) {
         pending_task = "Checking";
     */
     }
 
     return pending_task;
 }
 
 static enum rsc_role_e
 native_displayable_role(const pe_resource_t *rsc)
 {
     enum rsc_role_e role = rsc->role;
 
     if ((role == pcmk_role_started)
         && pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                        pe_rsc_promotable)) {
 
         role = pcmk_role_unpromoted;
     }
     return role;
 }
 
 static const char *
 native_displayable_state(const pe_resource_t *rsc, bool print_pending)
 {
     const char *rsc_state = NULL;
 
     if (print_pending) {
         rsc_state = native_pending_state(rsc);
     }
     if (rsc_state == NULL) {
         rsc_state = role2text(native_displayable_role(rsc));
     }
     return rsc_state;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 native_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, pcmk_is_set(options, pe_print_pending));
     const char *target_role = NULL;
 
     /* resource information. */
     status_print("%s<resource ", pre_text);
     status_print(XML_ATTR_ID "=\"%s\" ", rsc_printable_id(rsc));
     status_print("resource_agent=\"%s%s%s:%s\" ", class,
                  ((prov == NULL)? "" : PROVIDER_SEP),
                  ((prov == NULL)? "" : prov),
                  crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     status_print("role=\"%s\" ", rsc_state);
     if (rsc->meta) {
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
     if (target_role) {
         status_print("target_role=\"%s\" ", target_role);
     }
     status_print("active=\"%s\" ", pcmk__btoa(rsc->fns->active(rsc, TRUE)));
     status_print("orphaned=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_orphan));
     status_print("blocked=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_block));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print("failure_ignored=\"%s\" ",
                  pe__rsc_bool_str(rsc, pe_rsc_failure_ignored));
     status_print("nodes_running_on=\"%d\" ", g_list_length(rsc->running_on));
 
     if (options & pe_print_pending) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             status_print("pending=\"%s\" ", pending_task);
         }
     }
 
     /* print out the nodes this resource is running on */
     if (options & pe_print_rsconly) {
         status_print("/>\n");
         /* do nothing */
     } else if (rsc->running_on != NULL) {
         GList *gIter = rsc->running_on;
 
         status_print(">\n");
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             status_print("%s    <node name=\"%s\" " XML_ATTR_ID "=\"%s\" "
                          "cached=\"%s\"/>\n",
                          pre_text, pcmk__s(node->details->uname, ""),
                          node->details->id, pcmk__btoa(!node->details->online));
         }
         status_print("%s</resource>\n", pre_text);
     } else {
         status_print("/>\n");
     }
 }
 
 // Append a flag to resource description string's flags list
 static bool
 add_output_flag(GString *s, const char *flag_desc, bool have_flags)
 {
     g_string_append(s, (have_flags? ", " : " ("));
     g_string_append(s, flag_desc);
     return true;
 }
 
 // Append a node name to resource description string's node list
 static bool
 add_output_node(GString *s, const char *node, bool have_nodes)
 {
     g_string_append(s, (have_nodes? " " : " [ "));
     g_string_append(s, node);
     return true;
 }
 
 /*!
  * \internal
  * \brief Create a string description of a resource
  *
  * \param[in] rsc          Resource to describe
  * \param[in] name         Desired identifier for the resource
  * \param[in] node         If not NULL, node that resource is "on"
  * \param[in] show_opts    Bitmask of pcmk_show_opt_e.
  * \param[in] target_role  Resource's target role
  * \param[in] show_nodes   Whether to display nodes when multiply active
  *
  * \return Newly allocated string description of resource
  * \note Caller must free the result with g_free().
  */
 gchar *
 pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
                            const pe_node_t *node, uint32_t show_opts,
                            const char *target_role, bool show_nodes)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *provider = NULL;
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     GString *outstr = NULL;
     bool have_flags = false;
 
     if (rsc->variant != pcmk_rsc_variant_primitive) {
         return NULL;
     }
 
     CRM_CHECK(name != NULL, name = "unknown");
     CRM_CHECK(kind != NULL, kind = "unknown");
     CRM_CHECK(class != NULL, class = "unknown");
 
     if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
         provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     }
 
     if ((node == NULL) && (rsc->lock_node != NULL)) {
         node = rsc->lock_node;
     }
     if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only)
         || pcmk__list_of_multiple(rsc->running_on)) {
         node = NULL;
     }
 
     outstr = g_string_sized_new(128);
 
     // Resource name and agent
     pcmk__g_strcat(outstr,
                    name, "\t(", class, ((provider == NULL)? "" : PROVIDER_SEP),
                    pcmk__s(provider, ""), ":", kind, "):\t", NULL);
 
     // State on node
     if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         g_string_append(outstr, " ORPHANED");
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         enum rsc_role_e role = native_displayable_role(rsc);
 
         g_string_append(outstr, " FAILED");
         if (role > pcmk_role_unpromoted) {
             pcmk__add_word(&outstr, 0, role2text(role));
         }
     } else {
         bool show_pending = pcmk_is_set(show_opts, pcmk_show_pending);
 
         pcmk__add_word(&outstr, 0, native_displayable_state(rsc, show_pending));
     }
     if (node) {
         pcmk__add_word(&outstr, 0, pe__node_name(node));
     }
 
     // Failed probe operation
     if (native_displayable_role(rsc) == pcmk_role_stopped) {
         xmlNode *probe_op = pe__failed_probe_for_rsc(rsc, node ? node->details->uname : NULL);
         if (probe_op != NULL) {
             int rc;
 
             pcmk__scan_min_int(crm_element_value(probe_op, XML_LRM_ATTR_RC), &rc, 0);
             pcmk__g_strcat(outstr, " (", services_ocf_exitcode_str(rc), ") ",
                            NULL);
         }
     }
 
     // Flags, as: (<flag> [...])
     if (node && !(node->details->online) && node->details->unclean) {
         have_flags = add_output_flag(outstr, "UNCLEAN", have_flags);
     }
     if (node && (node == rsc->lock_node)) {
         have_flags = add_output_flag(outstr, "LOCKED", have_flags);
     }
     if (pcmk_is_set(show_opts, pcmk_show_pending)) {
         const char *pending_task = native_pending_task(rsc);
 
         if (pending_task) {
             have_flags = add_output_flag(outstr, pending_task, have_flags);
         }
     }
     if (target_role) {
         enum rsc_role_e target_role_e = text2role(target_role);
 
         /* Only show target role if it limits our abilities (i.e. ignore
          * Started, as it is the default anyways, and doesn't prevent the
          * resource from becoming promoted).
          */
         if (target_role_e == pcmk_role_stopped) {
             have_flags = add_output_flag(outstr, "disabled", have_flags);
 
         } else if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                                pe_rsc_promotable)
                    && (target_role_e == pcmk_role_unpromoted)) {
             have_flags = add_output_flag(outstr, "target-role:", have_flags);
             g_string_append(outstr, target_role);
         }
     }
 
     // Blocked or maintenance implies unmanaged
     if (pcmk_any_flags_set(rsc->flags, pe_rsc_block|pe_rsc_maintenance)) {
         if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
             have_flags = add_output_flag(outstr, "blocked", have_flags);
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
             have_flags = add_output_flag(outstr, "maintenance", have_flags);
         }
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         have_flags = add_output_flag(outstr, "unmanaged", have_flags);
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         have_flags = add_output_flag(outstr, "failure ignored", have_flags);
     }
 
 
     if (have_flags) {
         g_string_append_c(outstr, ')');
     }
 
     // User-supplied description
     if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)
         || pcmk__list_of_multiple(rsc->running_on)) {
         const char *desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
 
         if (desc) {
             g_string_append(outstr, " (");
             g_string_append(outstr, desc);
             g_string_append(outstr, ")");
 
         }
     }
 
     if (show_nodes && !pcmk_is_set(show_opts, pcmk_show_rsc_only)
         && pcmk__list_of_multiple(rsc->running_on)) {
         bool have_nodes = false;
 
         for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
             pe_node_t *n = (pe_node_t *) iter->data;
 
             have_nodes = add_output_node(outstr, n->details->uname, have_nodes);
         }
         if (have_nodes) {
             g_string_append(outstr, " ]");
         }
     }
 
     return g_string_free(outstr, FALSE);
 }
 
 int
 pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
                        const char *name, const pe_node_t *node,
                        uint32_t show_opts)
 {
     const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     const char *target_role = NULL;
 
     xmlNodePtr list_node = NULL;
     const char *cl = NULL;
 
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
     CRM_ASSERT(kind != NULL);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         cl = "rsc-managed";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         cl = "rsc-failed";
 
     } else if ((rsc->variant == pcmk_rsc_variant_primitive)
                && (rsc->running_on == NULL)) {
         cl = "rsc-failed";
 
     } else if (pcmk__list_of_multiple(rsc->running_on)) {
         cl = "rsc-multiple";
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         cl = "rsc-failure-ignored";
 
     } else {
         cl = "rsc-ok";
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
                                               target_role, true);
 
         list_node = pcmk__output_create_html_node(out, "li", NULL, NULL, NULL);
         pcmk_create_html_node(list_node, "span", NULL, cl, s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 int
 pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
                        const char *name, const pe_node_t *node,
                        uint32_t show_opts)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(show_opts, pcmk_show_implicit_rscs)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return pcmk_rc_no_output;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     {
         gchar *s = pcmk__native_output_string(rsc, name, node, show_opts,
                                               target_role, true);
 
         out->list_item(out, NULL, "%s", s);
         g_free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
              const pe_node_t *node, long options, void *print_data)
 {
     const char *target_role = NULL;
 
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
 
     if (rsc->meta) {
         const char *is_internal = g_hash_table_lookup(rsc->meta,
                                                       XML_RSC_ATTR_INTERNAL_RSC);
 
         if (crm_is_true(is_internal)
             && !pcmk_is_set(options, pe_print_implicit)) {
 
             crm_trace("skipping print of internal resource %s", rsc->id);
             return;
         }
         target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     if ((pre_text == NULL) && (options & pe_print_printf)) {
         pre_text = " ";
     }
 
     if (options & pe_print_html) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             status_print("<font color=\"yellow\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             status_print("<font color=\"red\">");
 
         } else if (rsc->running_on == NULL) {
             status_print("<font color=\"red\">");
 
         } else if (pcmk__list_of_multiple(rsc->running_on)) {
             status_print("<font color=\"orange\">");
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
             status_print("<font color=\"yellow\">");
 
         } else {
             status_print("<font color=\"green\">");
         }
     }
 
     {
         gchar *resource_s = pcmk__native_output_string(rsc, name, node, options,
                                                        target_role, false);
         status_print("%s%s", (pre_text? pre_text : ""), resource_s);
         g_free(resource_s);
     }
 
     if (pcmk_is_set(options, pe_print_html)) {
         status_print(" </font> ");
     }
 
     if (!pcmk_is_set(options, pe_print_rsconly)
         && pcmk__list_of_multiple(rsc->running_on)) {
 
         GList *gIter = rsc->running_on;
         int counter = 0;
 
         if (options & pe_print_html) {
             status_print("<ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print("[");
         }
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *n = (pe_node_t *) gIter->data;
 
             counter++;
 
             if (options & pe_print_html) {
                 status_print("<li>\n%s", pe__node_name(n));
 
             } else if ((options & pe_print_printf)
                        || (options & pe_print_ncurses)) {
                 status_print(" %s", pe__node_name(n));
 
             } else if ((options & pe_print_log)) {
                 status_print("\t%d : %s", counter, pe__node_name(n));
 
             } else {
                 status_print("%s", pe__node_name(n));
             }
             if (options & pe_print_html) {
                 status_print("</li>\n");
 
             }
         }
 
         if (options & pe_print_html) {
             status_print("</ul>\n");
         } else if ((options & pe_print_printf)
                    || (options & pe_print_ncurses)) {
             status_print(" ]");
         }
     }
 
     if (options & pe_print_html) {
         status_print("<br/>\n");
     } else if (options & pe_print_suppres_nl) {
         /* nothing */
     } else if ((options & pe_print_printf) || (options & pe_print_ncurses)) {
         status_print("\n");
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 native_print(pe_resource_t *rsc, const char *pre_text, long options,
              void *print_data)
 {
     const pe_node_t *node = NULL;
 
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
     if (options & pe_print_xml) {
         native_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     node = pe__current_node(rsc);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
 
     common_print(rsc, pre_text, rsc_printable_id(rsc), node, options, print_data);
 }
 
 PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     bool print_pending = pcmk_is_set(show_opts, pcmk_show_pending);
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     const char *rsc_state = native_displayable_state(rsc, print_pending);
 
     const char *desc = NULL;
     char ra_name[LINE_MAX];
     char *nodes_running_on = NULL;
     const char *lock_node_name = NULL;
     int rc = pcmk_rc_no_output;
     const char *target_role = NULL;
 
     desc = pe__resource_description(rsc, show_opts);
 
     if (rsc->meta != NULL) {
        target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
     }
 
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     /* resource information. */
     snprintf(ra_name, LINE_MAX, "%s%s%s:%s", class,
             ((prov == NULL)? "" : PROVIDER_SEP), ((prov == NULL)? "" : prov),
             crm_element_value(rsc->xml, XML_ATTR_TYPE));
 
     nodes_running_on = pcmk__itoa(g_list_length(rsc->running_on));
 
     if (rsc->lock_node != NULL) {
         lock_node_name = rsc->lock_node->details->uname;
     }
 
     rc = pe__name_and_nvpairs_xml(out, true, "resource", 15,
              "id", rsc_printable_id(rsc),
              "resource_agent", ra_name,
              "role", rsc_state,
              "target_role", target_role,
              "active", pcmk__btoa(rsc->fns->active(rsc, TRUE)),
              "orphaned", pe__rsc_bool_str(rsc, pe_rsc_orphan),
              "blocked", pe__rsc_bool_str(rsc, pe_rsc_block),
              "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
              "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
              "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
              "failure_ignored", pe__rsc_bool_str(rsc, pe_rsc_failure_ignored),
              "nodes_running_on", nodes_running_on,
              "pending", (print_pending? native_pending_task(rsc) : NULL),
              "locked_to", lock_node_name,
              "description", desc);
     free(nodes_running_on);
 
     CRM_ASSERT(rc == pcmk_rc_ok);
 
     if (rsc->running_on != NULL) {
         GList *gIter = rsc->running_on;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             rc = pe__name_and_nvpairs_xml(out, false, "node", 3,
                      "name", node->details->uname,
                      "id", node->details->id,
                      "cached", pcmk__btoa(node->details->online));
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
     }
 
     pcmk__output_xml_pop_parent(out);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_html(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const pe_node_t *node = pe__current_node(rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_html(out, rsc, rsc_printable_id(rsc), node, show_opts);
 }
 
 PCMK__OUTPUT_ARGS("primitive", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__resource_text(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node G_GNUC_UNUSED = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const pe_node_t *node = pe__current_node(rsc);
 
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_primitive);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return pcmk_rc_no_output;
     }
 
     if (node == NULL) {
         // This is set only if a non-probe action is pending on this node
         node = rsc->pending_node;
     }
     return pe__common_output_text(out, rsc, rsc_printable_id(rsc), node, show_opts);
 }
 
 void
 native_free(pe_resource_t * rsc)
 {
     pe_rsc_trace(rsc, "Freeing resource action list (not the data)");
     common_free(rsc);
 }
 
 enum rsc_role_e
 native_resource_state(const pe_resource_t * rsc, gboolean current)
 {
     enum rsc_role_e role = rsc->next_role;
 
     if (current) {
         role = rsc->role;
     }
     pe_rsc_trace(rsc, "%s state: %s", rsc->id, role2text(role));
     return role;
 }
 
 /*!
  * \internal
  * \brief List nodes where a resource (or any of its children) is
  *
  * \param[in]  rsc      Resource to check
  * \param[out] list     List to add result to
  * \param[in]  current  0 = where allocated, 1 = where running,
  *                      2 = where running or pending
  *
  * \return If list contains only one node, that node, or NULL otherwise
  */
 pe_node_t *
 native_location(const pe_resource_t *rsc, GList **list, int current)
 {
     // @COMPAT: Accept a pe__rsc_node argument instead of int current
     pe_node_t *one = NULL;
     GList *result = NULL;
 
     if (rsc->children) {
         GList *gIter = rsc->children;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             child->fns->location(child, &result, current);
         }
 
     } else if (current) {
 
         if (rsc->running_on) {
             result = g_list_copy(rsc->running_on);
         }
         if ((current == 2) && rsc->pending_node
             && !pe_find_node_id(result, rsc->pending_node->details->id)) {
                 result = g_list_append(result, rsc->pending_node);
         }
 
     } else if (current == FALSE && rsc->allocated_to) {
         result = g_list_append(NULL, rsc->allocated_to);
     }
 
     if (result && (result->next == NULL)) {
         one = result->data;
     }
 
     if (list) {
         GList *gIter = result;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pe_node_t *node = (pe_node_t *) gIter->data;
 
             if (*list == NULL || pe_find_node_id(*list, node->details->id) == NULL) {
                 *list = g_list_append(*list, node);
             }
         }
     }
 
     g_list_free(result);
     return one;
 }
 
 static void
 get_rscs_brief(GList *rsc_list, GHashTable * rsc_table, GHashTable * active_table)
 {
     GList *gIter = rsc_list;
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
 
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
         const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
         int offset = 0;
         char buffer[LINE_MAX];
 
         int *rsc_counter = NULL;
         int *active_counter = NULL;
 
         if (rsc->variant != pcmk_rsc_variant_primitive) {
             continue;
         }
 
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s", class);
         if (pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider)) {
             const char *prov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
 
             if (prov != NULL) {
                 offset += snprintf(buffer + offset, LINE_MAX - offset,
                                    PROVIDER_SEP "%s", prov);
             }
         }
         offset += snprintf(buffer + offset, LINE_MAX - offset, ":%s", kind);
         CRM_LOG_ASSERT(offset > 0);
 
         if (rsc_table) {
             rsc_counter = g_hash_table_lookup(rsc_table, buffer);
             if (rsc_counter == NULL) {
                 rsc_counter = calloc(1, sizeof(int));
                 *rsc_counter = 0;
                 g_hash_table_insert(rsc_table, strdup(buffer), rsc_counter);
             }
             (*rsc_counter)++;
         }
 
         if (active_table) {
             GList *gIter2 = rsc->running_on;
 
             for (; gIter2 != NULL; gIter2 = gIter2->next) {
                 pe_node_t *node = (pe_node_t *) gIter2->data;
                 GHashTable *node_table = NULL;
 
                 if (node->details->unclean == FALSE && node->details->online == FALSE &&
                     pcmk_is_set(rsc->flags, pe_rsc_managed)) {
                     continue;
                 }
 
                 node_table = g_hash_table_lookup(active_table, node->details->uname);
                 if (node_table == NULL) {
                     node_table = pcmk__strkey_table(free, free);
                     g_hash_table_insert(active_table, strdup(node->details->uname), node_table);
                 }
 
                 active_counter = g_hash_table_lookup(node_table, buffer);
                 if (active_counter == NULL) {
                     active_counter = calloc(1, sizeof(int));
                     *active_counter = 0;
                     g_hash_table_insert(node_table, strdup(buffer), active_counter);
                 }
                 (*active_counter)++;
             }
         }
     }
 }
 
 static void
 destroy_node_table(gpointer data)
 {
     GHashTable *node_table = data;
 
     if (node_table) {
         g_hash_table_destroy(node_table);
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 print_rscs_brief(GList *rsc_list, const char *pre_text, long options,
                  void *print_data, gboolean print_all)
 {
     GHashTable *rsc_table = pcmk__strkey_table(free, free);
     GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
     GHashTableIter hash_iter;
     char *type = NULL;
     int *rsc_counter = NULL;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     g_hash_table_iter_init(&hash_iter, rsc_table);
     while (g_hash_table_iter_next(&hash_iter, (gpointer *)&type, (gpointer *)&rsc_counter)) {
         GHashTableIter hash_iter2;
         char *node_name = NULL;
         GHashTable *node_table = NULL;
         int active_counter_all = 0;
 
         g_hash_table_iter_init(&hash_iter2, active_table);
         while (g_hash_table_iter_next(&hash_iter2, (gpointer *)&node_name, (gpointer *)&node_table)) {
             int *active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (options & pe_print_rsconly) {
                 node_name = NULL;
             }
 
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             if (print_all) {
                 status_print("%s%d/%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0,
                              rsc_counter ? *rsc_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             } else {
                 status_print("%s%d\t(%s):\tActive %s\n", pre_text ? pre_text : "",
                              active_counter ? *active_counter : 0, type,
                              active_counter && (*active_counter > 0) && node_name ? node_name : "");
             }
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
 
         if (print_all && active_counter_all == 0) {
             if (options & pe_print_html) {
                 status_print("<li>\n");
             }
 
             status_print("%s%d/%d\t(%s):\tActive\n", pre_text ? pre_text : "",
                          active_counter_all,
                          rsc_counter ? *rsc_counter : 0, type);
 
             if (options & pe_print_html) {
                 status_print("</li>\n");
             }
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
 }
 
 int
 pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, uint32_t show_opts)
 {
     GHashTable *rsc_table = pcmk__strkey_table(free, free);
     GHashTable *active_table = pcmk__strkey_table(free, destroy_node_table);
     GList *sorted_rscs;
     int rc = pcmk_rc_no_output;
 
     get_rscs_brief(rsc_list, rsc_table, active_table);
 
     /* Make a list of the rsc_table keys so that it can be sorted.  This is to make sure
      * output order stays consistent between systems.
      */
     sorted_rscs = g_hash_table_get_keys(rsc_table);
     sorted_rscs = g_list_sort(sorted_rscs, (GCompareFunc) strcmp);
 
     for (GList *gIter = sorted_rscs; gIter; gIter = gIter->next) {
         char *type = (char *) gIter->data;
         int *rsc_counter = g_hash_table_lookup(rsc_table, type);
 
         GList *sorted_nodes = NULL;
         int active_counter_all = 0;
 
         /* Also make a list of the active_table keys so it can be sorted.  If there's
          * more than one instance of a type of resource running, we need the nodes to
          * be sorted to make sure output order stays consistent between systems.
          */
         sorted_nodes = g_hash_table_get_keys(active_table);
         sorted_nodes = g_list_sort(sorted_nodes, (GCompareFunc) pcmk__numeric_strcasecmp);
 
         for (GList *gIter2 = sorted_nodes; gIter2; gIter2 = gIter2->next) {
             char *node_name = (char *) gIter2->data;
             GHashTable *node_table = g_hash_table_lookup(active_table, node_name);
             int *active_counter = NULL;
 
             if (node_table == NULL) {
                 continue;
             }
 
             active_counter = g_hash_table_lookup(node_table, type);
 
             if (active_counter == NULL || *active_counter == 0) {
                 continue;
 
             } else {
                 active_counter_all += *active_counter;
             }
 
             if (pcmk_is_set(show_opts, pcmk_show_rsc_only)) {
                 node_name = NULL;
             }
 
             if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
                 out->list_item(out, NULL, "%d/%d\t(%s):\tActive %s",
                                *active_counter,
                                rsc_counter ? *rsc_counter : 0, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             } else {
                 out->list_item(out, NULL, "%d\t(%s):\tActive %s",
                                *active_counter, type,
                                (*active_counter > 0) && node_name ? node_name : "");
             }
 
             rc = pcmk_rc_ok;
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs) && active_counter_all == 0) {
             out->list_item(out, NULL, "%d/%d\t(%s):\tActive",
                            active_counter_all,
                            rsc_counter ? *rsc_counter : 0, type);
             rc = pcmk_rc_ok;
         }
 
         if (sorted_nodes) {
             g_list_free(sorted_nodes);
         }
     }
 
     if (rsc_table) {
         g_hash_table_destroy(rsc_table);
         rsc_table = NULL;
     }
     if (active_table) {
         g_hash_table_destroy(active_table);
         active_table = NULL;
     }
     if (sorted_rscs) {
         g_list_free(sorted_rscs);
     }
 
     return rc;
 }
 
 gboolean
 pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                        gboolean check_parent)
 {
     if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches) ||
         pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches)) {
         return FALSE;
     } else if (check_parent && rsc->parent) {
         const pe_resource_t *up = pe__const_top_resource(rsc, true);
 
         return up->fns->is_filtered(up, only_rsc, FALSE);
     }
 
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Get maximum primitive resource instances per node
  *
  * \param[in] rsc  Primitive resource to check
  *
  * \return Maximum number of \p rsc instances that can be active on one node
  */
 unsigned int
 pe__primitive_max_per_node(const pe_resource_t *rsc)
 {
     CRM_ASSERT((rsc != NULL) && (rsc->variant == pcmk_rsc_variant_primitive));
     return 1U;
 }
diff --git a/lib/pengine/pe_output.c b/lib/pengine/pe_output.c
index ebd74db07b..c542f6f35d 100644
--- a/lib/pengine/pe_output.c
+++ b/lib/pengine/pe_output.c
@@ -1,3111 +1,3111 @@
 /*
  * Copyright 2019-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdint.h>
 
 #include <crm/common/xml_internal.h>
 #include <crm/common/output.h>
 #include <crm/common/scheduler_internal.h>
 #include <crm/cib/util.h>
 #include <crm/msg_xml.h>
 #include <crm/pengine/internal.h>
 
 const char *
 pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts)
 {
     const char * desc = NULL;
     // User-supplied description
     if (pcmk_any_flags_set(show_opts, pcmk_show_rsc_only|pcmk_show_description)) {
         desc = crm_element_value(rsc->xml, XML_ATTR_DESC);
     }
     return desc;
 }
 
 /* Never display node attributes whose name starts with one of these prefixes */
 #define FILTER_STR { PCMK__FAIL_COUNT_PREFIX, PCMK__LAST_FAILURE_PREFIX,   \
                      "shutdown", "terminate", "standby", "#", NULL }
 
 static int
 compare_attribute(gconstpointer a, gconstpointer b)
 {
     int rc;
 
     rc = strcmp((const char *)a, (const char *)b);
 
     return rc;
 }
 
 /*!
  * \internal
  * \brief Determine whether extended information about an attribute should be added.
  *
  * \param[in]     node            Node that ran this resource
  * \param[in,out] rsc_list        List of resources for this node
  * \param[in,out] data_set        Cluster working set
  * \param[in]     attrname        Attribute to find
  * \param[out]    expected_score  Expected value for this attribute
  *
  * \return true if extended information should be printed, false otherwise
  * \note Currently, extended information is only supported for ping/pingd
  *       resources, for which a message will be printed if connectivity is lost
  *       or degraded.
  */
 static bool
 add_extra_info(const pe_node_t *node, GList *rsc_list, pe_working_set_t *data_set,
                const char *attrname, int *expected_score)
 {
     GList *gIter = NULL;
 
     for (gIter = rsc_list; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) gIter->data;
         const char *type = g_hash_table_lookup(rsc->meta, "type");
         const char *name = NULL;
         GHashTable *params = NULL;
 
         if (rsc->children != NULL) {
             if (add_extra_info(node, rsc->children, data_set, attrname,
                                expected_score)) {
                 return true;
             }
         }
 
         if (!pcmk__strcase_any_of(type, "ping", "pingd", NULL)) {
             continue;
         }
 
         params = pe_rsc_params(rsc, node, data_set);
         name = g_hash_table_lookup(params, "name");
 
         if (name == NULL) {
             name = "pingd";
         }
 
         /* To identify the resource with the attribute name. */
         if (pcmk__str_eq(name, attrname, pcmk__str_casei)) {
             int host_list_num = 0;
             const char *hosts = g_hash_table_lookup(params, "host_list");
             const char *multiplier = g_hash_table_lookup(params, "multiplier");
             int multiplier_i;
 
             if (hosts) {
                 char **host_list = g_strsplit(hosts, " ", 0);
                 host_list_num = g_strv_length(host_list);
                 g_strfreev(host_list);
             }
 
             if ((multiplier == NULL)
                 || (pcmk__scan_min_int(multiplier, &multiplier_i,
                                        INT_MIN) != pcmk_rc_ok)) {
                 /* The ocf:pacemaker:ping resource agent defaults multiplier to
                  * 1. The agent currently does not handle invalid text, but it
                  * should, and this would be a reasonable choice ...
                  */
                 multiplier_i = 1;
             }
             *expected_score = host_list_num * multiplier_i;
 
             return true;
         }
     }
     return false;
 }
 
 static GList *
 filter_attr_list(GList *attr_list, char *name)
 {
     int i;
     const char *filt_str[] = FILTER_STR;
 
     CRM_CHECK(name != NULL, return attr_list);
 
     /* filtering automatic attributes */
     for (i = 0; filt_str[i] != NULL; i++) {
         if (g_str_has_prefix(name, filt_str[i])) {
             return attr_list;
         }
     }
 
     return g_list_insert_sorted(attr_list, name, compare_attribute);
 }
 
 static GList *
 get_operation_list(xmlNode *rsc_entry) {
     GList *op_list = NULL;
     xmlNode *rsc_op = NULL;
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry); rsc_op != NULL;
          rsc_op = pcmk__xe_next(rsc_op)) {
         const char *task = crm_element_value(rsc_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(rsc_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(rsc_op, XML_LRM_ATTR_RC);
         int op_rc_i;
 
         pcmk__scan_min_int(op_rc, &op_rc_i, 0);
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* Ignore notifies and some probes */
         if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)
             || (pcmk__str_eq(task, "probe", pcmk__str_none)
                 && (op_rc_i == CRM_EX_NOT_RUNNING))) {
             continue;
         }
 
         if (pcmk__str_eq((const char *)rsc_op->name, XML_LRM_TAG_RSC_OP, pcmk__str_none)) {
             op_list = g_list_append(op_list, rsc_op);
         }
     }
 
     op_list = g_list_sort(op_list, sort_op_by_callid);
     return op_list;
 }
 
 static void
 add_dump_node(gpointer key, gpointer value, gpointer user_data)
 {
     xmlNodePtr node = user_data;
     pcmk_create_xml_text_node(node, (const char *) key, (const char *) value);
 }
 
 static void
 append_dump_text(gpointer key, gpointer value, gpointer user_data)
 {
     char **dump_text = user_data;
     char *new_text = crm_strdup_printf("%s %s=%s",
                                        *dump_text, (char *)key, (char *)value);
 
     free(*dump_text);
     *dump_text = new_text;
 }
 
 static const char *
 get_cluster_stack(pe_working_set_t *data_set)
 {
     xmlNode *stack = get_xpath_object("//nvpair[@name='cluster-infrastructure']",
                                       data_set->input, LOG_DEBUG);
     return stack? crm_element_value(stack, XML_NVPAIR_ATTR_VALUE) : "unknown";
 }
 
 static char *
 last_changed_string(const char *last_written, const char *user,
                     const char *client, const char *origin) {
     if (last_written != NULL || user != NULL || client != NULL || origin != NULL) {
         return crm_strdup_printf("%s%s%s%s%s%s%s",
                                  last_written ? last_written : "",
                                  user ? " by " : "",
                                  user ? user : "",
                                  client ? " via " : "",
                                  client ? client : "",
                                  origin ? " on " : "",
                                  origin ? origin : "");
     } else {
         return strdup("");
     }
 }
 
 static char *
 op_history_string(xmlNode *xml_op, const char *task, const char *interval_ms_s,
                   int rc, bool print_timing) {
     const char *call = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     char *interval_str = NULL;
     char *buf = NULL;
 
     if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
         char *pair = pcmk__format_nvpair("interval", interval_ms_s, "ms");
         interval_str = crm_strdup_printf(" %s", pair);
         free(pair);
     }
 
     if (print_timing) {
         char *last_change_str = NULL;
         char *exec_str = NULL;
         char *queue_str = NULL;
 
         const char *value = NULL;
 
         time_t epoch = 0;
 
         if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE, &epoch) == pcmk_ok)
             && (epoch > 0)) {
             char *epoch_str = pcmk__epoch2str(&epoch, 0);
 
             last_change_str = crm_strdup_printf(" %s=\"%s\"",
                                                 XML_RSC_OP_LAST_CHANGE,
                                                 pcmk__s(epoch_str, ""));
             free(epoch_str);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
         if (value) {
             char *pair = pcmk__format_nvpair(XML_RSC_OP_T_EXEC, value, "ms");
             exec_str = crm_strdup_printf(" %s", pair);
             free(pair);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
         if (value) {
             char *pair = pcmk__format_nvpair(XML_RSC_OP_T_QUEUE, value, "ms");
             queue_str = crm_strdup_printf(" %s", pair);
             free(pair);
         }
 
         buf = crm_strdup_printf("(%s) %s:%s%s%s%s rc=%d (%s)", call, task,
                                 interval_str ? interval_str : "",
                                 last_change_str ? last_change_str : "",
                                 exec_str ? exec_str : "",
                                 queue_str ? queue_str : "",
                                 rc, services_ocf_exitcode_str(rc));
 
         if (last_change_str) {
             free(last_change_str);
         }
 
         if (exec_str) {
             free(exec_str);
         }
 
         if (queue_str) {
             free(queue_str);
         }
     } else {
         buf = crm_strdup_printf("(%s) %s%s%s", call, task,
                                 interval_str ? ":" : "",
                                 interval_str ? interval_str : "");
     }
 
     if (interval_str) {
         free(interval_str);
     }
 
     return buf;
 }
 
 static char *
 resource_history_string(pe_resource_t *rsc, const char *rsc_id, bool all,
                         int failcount, time_t last_failure) {
     char *buf = NULL;
 
     if (rsc == NULL) {
         buf = crm_strdup_printf("%s: orphan", rsc_id);
     } else if (all || failcount || last_failure > 0) {
         char *failcount_s = NULL;
         char *lastfail_s = NULL;
 
         if (failcount > 0) {
             failcount_s = crm_strdup_printf(" %s=%d", PCMK__FAIL_COUNT_PREFIX,
                                             failcount);
         } else {
             failcount_s = strdup("");
         }
         if (last_failure > 0) {
             buf = pcmk__epoch2str(&last_failure, 0);
             lastfail_s = crm_strdup_printf(" %s='%s'",
                                            PCMK__LAST_FAILURE_PREFIX, buf);
             free(buf);
         }
 
         buf = crm_strdup_printf("%s: migration-threshold=%d%s%s",
                                 rsc_id, rsc->migration_threshold, failcount_s,
                                 lastfail_s? lastfail_s : "");
         free(failcount_s);
         free(lastfail_s);
     } else {
         buf = crm_strdup_printf("%s:", rsc_id);
     }
 
     return buf;
 }
 
 static const char *
 get_node_feature_set(pe_node_t *node) {
     const char *feature_set = NULL;
 
     if (node->details->online && !pe__is_guest_or_remote_node(node)) {
         feature_set = g_hash_table_lookup(node->details->attrs,
                                           CRM_ATTR_FEATURE_SET);
         /* The feature set attribute is present since 3.15.1. If it is missing
          * then the node must be running an earlier version. */
         if (feature_set == NULL) {
             feature_set = "<3.15.1";
         }
     }
     return feature_set;
 }
 
 static bool
 is_mixed_version(pe_working_set_t *data_set) {
     const char *feature_set = NULL;
     for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = gIter->data;
         const char *node_feature_set = get_node_feature_set(node);
         if (node_feature_set != NULL) {
             if (feature_set == NULL) {
                 feature_set = node_feature_set;
             } else if (strcmp(feature_set, node_feature_set) != 0) {
                 return true;
             }
         }
     }
     return false;
 }
 
 static char *
 formatted_xml_buf(pe_resource_t *rsc, bool raw)
 {
     if (raw) {
         return dump_xml_formatted(rsc->orig_xml ? rsc->orig_xml : rsc->xml);
     } else {
         return dump_xml_formatted(rsc->xml);
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
                   "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
 static int
 cluster_summary(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     int rc = pcmk_rc_no_output;
     const char *stack_s = get_cluster_stack(data_set);
 
     if (pcmk_is_set(section_opts, pcmk_section_stack)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-stack", stack_s, pcmkd_state);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_dc)) {
         xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
                                                data_set->input, LOG_DEBUG);
         const char *dc_version_s = dc_version?
                                    crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
                                    : NULL;
         const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
         char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
         bool mixed_version = is_mixed_version(data_set);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-dc", data_set->dc_node, quorum,
                      dc_version_s, dc_name, mixed_version);
         free(dc_name);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_times)) {
         const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
         const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
         const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
         const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-times",
                      data_set->localhost, last_written, user, client, origin);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_counts)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-counts", g_list_length(data_set->nodes),
                      data_set->ninstances, data_set->disabled_resources,
                      data_set->blocked_resources);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_options)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-options", data_set);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
         if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("cluster-summary", "pe_working_set_t *",
                   "enum pcmk_pacemakerd_state", "uint32_t", "uint32_t")
 static int
 cluster_summary_html(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     int rc = pcmk_rc_no_output;
     const char *stack_s = get_cluster_stack(data_set);
 
     if (pcmk_is_set(section_opts, pcmk_section_stack)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-stack", stack_s, pcmkd_state);
     }
 
     /* Always print DC if none, even if not requested */
     if (data_set->dc_node == NULL || pcmk_is_set(section_opts, pcmk_section_dc)) {
         xmlNode *dc_version = get_xpath_object("//nvpair[@name='dc-version']",
                                                data_set->input, LOG_DEBUG);
         const char *dc_version_s = dc_version?
                                    crm_element_value(dc_version, XML_NVPAIR_ATTR_VALUE)
                                    : NULL;
         const char *quorum = crm_element_value(data_set->input, XML_ATTR_HAVE_QUORUM);
         char *dc_name = data_set->dc_node ? pe__node_display_name(data_set->dc_node, pcmk_is_set(show_opts, pcmk_show_node_id)) : NULL;
         bool mixed_version = is_mixed_version(data_set);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-dc", data_set->dc_node, quorum,
                      dc_version_s, dc_name, mixed_version);
         free(dc_name);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_times)) {
         const char *last_written = crm_element_value(data_set->input, XML_CIB_ATTR_WRITTEN);
         const char *user = crm_element_value(data_set->input, XML_ATTR_UPDATE_USER);
         const char *client = crm_element_value(data_set->input, XML_ATTR_UPDATE_CLIENT);
         const char *origin = crm_element_value(data_set->input, XML_ATTR_UPDATE_ORIG);
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-times",
                      data_set->localhost, last_written, user, client, origin);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_counts)) {
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Cluster Summary");
         out->message(out, "cluster-counts", g_list_length(data_set->nodes),
                      data_set->ninstances, data_set->disabled_resources,
                      data_set->blocked_resources);
     }
 
     if (pcmk_is_set(section_opts, pcmk_section_options)) {
         /* Kind of a hack - close the list we may have opened earlier in this
          * function so we can put all the options into their own list.  We
          * only want to do this on HTML output, though.
          */
         PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
         out->begin_list(out, NULL, NULL, "Config Options");
         out->message(out, "cluster-options", data_set);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
 
     if (pcmk_is_set(section_opts, pcmk_section_maint_mode)) {
         if (out->message(out, "maint-mode", data_set->flags) == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     return rc;
 }
 
 char *
 pe__node_display_name(pe_node_t *node, bool print_detail)
 {
     char *node_name;
     const char *node_host = NULL;
     const char *node_id = NULL;
     int name_len;
 
     CRM_ASSERT((node != NULL) && (node->details != NULL) && (node->details->uname != NULL));
 
     /* Host is displayed only if this is a guest node and detail is requested */
     if (print_detail && pe__is_guest_node(node)) {
         const pe_resource_t *container = node->details->remote_rsc->container;
         const pe_node_t *host_node = pe__current_node(container);
 
         if (host_node && host_node->details) {
             node_host = host_node->details->uname;
         }
         if (node_host == NULL) {
             node_host = ""; /* so we at least get "uname@" to indicate guest */
         }
     }
 
     /* Node ID is displayed if different from uname and detail is requested */
     if (print_detail && !pcmk__str_eq(node->details->uname, node->details->id, pcmk__str_casei)) {
         node_id = node->details->id;
     }
 
     /* Determine name length */
     name_len = strlen(node->details->uname) + 1;
     if (node_host) {
         name_len += strlen(node_host) + 1; /* "@node_host" */
     }
     if (node_id) {
         name_len += strlen(node_id) + 3; /* + " (node_id)" */
     }
 
     /* Allocate and populate display name */
     node_name = malloc(name_len);
     CRM_ASSERT(node_name != NULL);
     strcpy(node_name, node->details->uname);
     if (node_host) {
         strcat(node_name, "@");
         strcat(node_name, node_host);
     }
     if (node_id) {
         strcat(node_name, " (");
         strcat(node_name, node_id);
         strcat(node_name, ")");
     }
     return node_name;
 }
 
 int
 pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...)
 {
     xmlNodePtr xml_node = NULL;
     va_list args;
 
     CRM_ASSERT(tag_name != NULL);
 
     xml_node = pcmk__output_xml_peek_parent(out);
     CRM_ASSERT(xml_node != NULL);
     xml_node = create_xml_node(xml_node, tag_name);
 
     va_start(args, pairs_count);
     while(pairs_count--) {
         const char *param_name = va_arg(args, const char *);
         const char *param_value = va_arg(args, const char *);
         if (param_name && param_value) {
             crm_xml_add(xml_node, param_name, param_value);
         }
     };
     va_end(args);
 
     if (is_list) {
         pcmk__output_xml_push_parent(out, xml_node);
     }
     return pcmk_rc_ok;
 }
 
 static const char *
 role_desc(enum rsc_role_e role)
 {
     if (role == pcmk_role_promoted) {
 #ifdef PCMK__COMPAT_2_0
         return "as " PCMK__ROLE_PROMOTED_LEGACY " ";
 #else
         return "in " PCMK__ROLE_PROMOTED " role ";
 #endif
     }
     return "";
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
 static int
 ban_html(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *node_name = pe__node_display_name(pe_node,
                                             pcmk_is_set(show_opts, pcmk_show_node_id));
     char *buf = crm_strdup_printf("%s\tprevents %s from running %son %s",
                                   location->id, location->rsc_lh->id,
                                   role_desc(location->role_filter), node_name);
 
     pcmk__output_create_html_node(out, "li", NULL, NULL, buf);
 
     free(node_name);
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
 static int
 ban_text(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *node_name = pe__node_display_name(pe_node,
                                             pcmk_is_set(show_opts, pcmk_show_node_id));
     out->list_item(out, NULL, "%s\tprevents %s from running %son %s",
                    location->id, location->rsc_lh->id,
                    role_desc(location->role_filter), node_name);
 
     free(node_name);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban", "pe_node_t *", "pe__location_t *", "uint32_t")
 static int
 ban_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *pe_node = va_arg(args, pe_node_t *);
     pe__location_t *location = va_arg(args, pe__location_t *);
     uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
 
     const char *promoted_only = pcmk__btoa(location->role_filter == pcmk_role_promoted);
     char *weight_s = pcmk__itoa(pe_node->weight);
 
     pcmk__output_create_xml_node(out, "ban",
                                  "id", location->id,
                                  "resource", location->rsc_lh->id,
                                  "node", pe_node->details->uname,
                                  "weight", weight_s,
                                  "promoted-only", promoted_only,
                                  /* This is a deprecated alias for
                                   * promoted_only. Removing it will break
                                   * backward compatibility of the API schema,
                                   * which will require an API schema major
                                   * version bump.
                                   */
                                  "master_only", promoted_only,
                                  NULL);
 
     free(weight_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ban-list", "pe_working_set_t *", "const char *", "GList *",
                   "uint32_t", "bool")
 static int
 ban_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     const char *prefix = va_arg(args, const char *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     GList *gIter, *gIter2;
     int rc = pcmk_rc_no_output;
 
     /* Print each ban */
     for (gIter = data_set->placement_constraints; gIter != NULL; gIter = gIter->next) {
         pe__location_t *location = gIter->data;
         const pe_resource_t *rsc = location->rsc_lh;
 
         if (prefix != NULL && !g_str_has_prefix(location->id, prefix)) {
             continue;
         }
 
         if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
                                pcmk__str_star_matches)
             && !pcmk__str_in_list(rsc_printable_id(pe__const_top_resource(rsc, false)),
                                   only_rsc, pcmk__str_star_matches)) {
             continue;
         }
 
         for (gIter2 = location->node_list_rh; gIter2 != NULL; gIter2 = gIter2->next) {
             pe_node_t *node = (pe_node_t *) gIter2->data;
 
             if (node->weight < 0) {
                 PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Negative Location Constraints");
                 out->message(out, "ban", node, location, show_opts);
             }
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
 static int
 cluster_counts_html(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
     xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "li", NULL);
     xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "li", NULL);
 
     char *nnodes_str = crm_strdup_printf("%d node%s configured",
                                          nnodes, pcmk__plural_s(nnodes));
 
     pcmk_create_html_node(nodes_node, "span", NULL, NULL, nnodes_str);
     free(nnodes_str);
 
     if (ndisabled && nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     ndisabled);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
 
         s = crm_strdup_printf(", %d ", nblocked);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL,
                               " from further action due to failure)");
     } else if (ndisabled && !nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     ndisabled);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "DISABLED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL, ")");
     } else if (!ndisabled && nblocked) {
         char *s = crm_strdup_printf("%d resource instance%s configured (%d ",
                                     nresources, pcmk__plural_s(nresources),
                                     nblocked);
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
 
         pcmk_create_html_node(resources_node, "span", NULL, "bold", "BLOCKED");
         pcmk_create_html_node(resources_node, "span", NULL, NULL,
                               " from further action due to failure)");
     } else {
         char *s = crm_strdup_printf("%d resource instance%s configured",
                                     nresources, pcmk__plural_s(nresources));
         pcmk_create_html_node(resources_node, "span", NULL, NULL, s);
         free(s);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
 static int
 cluster_counts_text(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
     out->list_item(out, NULL, "%d node%s configured",
                    nnodes, pcmk__plural_s(nnodes));
 
     if (ndisabled && nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d DISABLED, %d BLOCKED from "
                                   "further action due to failure)",
                        nresources, pcmk__plural_s(nresources), ndisabled,
                        nblocked);
     } else if (ndisabled && !nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d DISABLED)",
                        nresources, pcmk__plural_s(nresources), ndisabled);
     } else if (!ndisabled && nblocked) {
         out->list_item(out, NULL, "%d resource instance%s configured "
                                   "(%d BLOCKED from further action "
                                   "due to failure)",
                        nresources, pcmk__plural_s(nresources), nblocked);
     } else {
         out->list_item(out, NULL, "%d resource instance%s configured",
                        nresources, pcmk__plural_s(nresources));
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-counts", "unsigned int", "int", "int", "int")
 static int
 cluster_counts_xml(pcmk__output_t *out, va_list args) {
     unsigned int nnodes = va_arg(args, unsigned int);
     int nresources = va_arg(args, int);
     int ndisabled = va_arg(args, int);
     int nblocked = va_arg(args, int);
 
     xmlNodePtr nodes_node = pcmk__output_create_xml_node(out, "nodes_configured", NULL);
     xmlNodePtr resources_node = pcmk__output_create_xml_node(out, "resources_configured", NULL);
 
     char *s = pcmk__itoa(nnodes);
     crm_xml_add(nodes_node, "number", s);
     free(s);
 
     s = pcmk__itoa(nresources);
     crm_xml_add(resources_node, "number", s);
     free(s);
 
     s = pcmk__itoa(ndisabled);
     crm_xml_add(resources_node, "disabled", s);
     free(s);
 
     s = pcmk__itoa(nblocked);
     crm_xml_add(resources_node, "blocked", s);
     free(s);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
                   "char *", "int")
 static int
 cluster_dc_html(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name = va_arg(args, char *);
     bool mixed_version = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
     pcmk_create_html_node(node, "span", NULL, "bold", "Current DC: ");
 
     if (dc) {
         char *buf = crm_strdup_printf("%s (version %s) -", dc_name,
                                       dc_version_s ? dc_version_s : "unknown");
         pcmk_create_html_node(node, "span", NULL, NULL, buf);
         free(buf);
 
         if (mixed_version) {
             pcmk_create_html_node(node, "span", NULL, "warning",
                                   " MIXED-VERSION");
         }
         pcmk_create_html_node(node, "span", NULL, NULL, " partition");
         if (crm_is_true(quorum)) {
             pcmk_create_html_node(node, "span", NULL, NULL, " with");
         } else {
             pcmk_create_html_node(node, "span", NULL, "warning", " WITHOUT");
         }
         pcmk_create_html_node(node, "span", NULL, NULL, " quorum");
     } else {
         pcmk_create_html_node(node, "span", NULL, "warning", "NONE");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
                   "char *", "int")
 static int
 cluster_dc_text(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name = va_arg(args, char *);
     bool mixed_version = va_arg(args, int);
 
     if (dc) {
         out->list_item(out, "Current DC",
                        "%s (version %s) - %spartition %s quorum",
                        dc_name, dc_version_s ? dc_version_s : "unknown",
                        mixed_version ? "MIXED-VERSION " : "",
                        crm_is_true(quorum) ? "with" : "WITHOUT");
     } else {
         out->list_item(out, "Current DC", "NONE");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-dc", "pe_node_t *", "const char *", "const char *",
                   "char *", "int")
 static int
 cluster_dc_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *dc = va_arg(args, pe_node_t *);
     const char *quorum = va_arg(args, const char *);
     const char *dc_version_s = va_arg(args, const char *);
     char *dc_name G_GNUC_UNUSED = va_arg(args, char *);
     bool mixed_version = va_arg(args, int);
 
     if (dc) {
         pcmk__output_create_xml_node(out, "current_dc",
                                      "present", "true",
                                      "version", dc_version_s ? dc_version_s : "",
                                      "name", dc->details->uname,
                                      "id", dc->details->id,
                                      "with_quorum", pcmk__btoa(crm_is_true(quorum)),
                                      "mixed_version", pcmk__btoa(mixed_version),
                                      NULL);
     } else {
         pcmk__output_create_xml_node(out, "current_dc",
                                      "present", "false",
                                      NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("maint-mode", "unsigned long long int")
 static int
 cluster_maint_mode_text(pcmk__output_t *out, va_list args) {
     unsigned long long flags = va_arg(args, unsigned long long);
 
     if (pcmk_is_set(flags, pe_flag_maintenance_mode)) {
         pcmk__formatted_printf(out, "\n              *** Resource management is DISABLED ***\n");
         pcmk__formatted_printf(out, "  The cluster will not attempt to start, stop or recover services\n");
         return pcmk_rc_ok;
     } else if (pcmk_is_set(flags, pe_flag_stop_everything)) {
         pcmk__formatted_printf(out, "\n    *** Resource management is DISABLED ***\n");
         pcmk__formatted_printf(out, "  The cluster will keep all resources stopped\n");
         return pcmk_rc_ok;
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_html(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     out->list_item(out, NULL, "STONITH of failed nodes %s",
                    pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
 
     out->list_item(out, NULL, "Cluster is %s",
                    pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             out->list_item(out, NULL, "No quorum policy: Freeze resources");
             break;
 
         case no_quorum_stop:
             out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
             break;
 
         case no_quorum_demote:
             out->list_item(out, NULL, "No quorum policy: Demote promotable "
                            "resources and stop all other resources");
             break;
 
         case no_quorum_ignore:
             out->list_item(out, NULL, "No quorum policy: Ignore");
             break;
 
         case no_quorum_suicide:
             out->list_item(out, NULL, "No quorum policy: Suicide");
             break;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
         pcmk_create_html_node(node, "span", NULL, "bold", "DISABLED");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               " (the cluster will not attempt to start, stop, or recover services)");
     } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
         xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
         pcmk_create_html_node(node, "span", NULL, NULL, "Resource management: ");
         pcmk_create_html_node(node, "span", NULL, "bold", "STOPPED");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               " (the cluster will keep all resources stopped)");
     } else {
         out->list_item(out, NULL, "Resource management: enabled");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_log(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         return out->info(out, "Resource management is DISABLED.  The cluster will not attempt to start, stop or recover services.");
     } else if (pcmk_is_set(data_set->flags, pe_flag_stop_everything)) {
         return out->info(out, "Resource management is DISABLED.  The cluster has stopped all resources.");
     } else {
         return pcmk_rc_no_output;
     }
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_text(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     out->list_item(out, NULL, "STONITH of failed nodes %s",
                    pcmk_is_set(data_set->flags, pe_flag_stonith_enabled) ? "enabled" : "disabled");
 
     out->list_item(out, NULL, "Cluster is %s",
                    pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster) ? "symmetric" : "asymmetric");
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             out->list_item(out, NULL, "No quorum policy: Freeze resources");
             break;
 
         case no_quorum_stop:
             out->list_item(out, NULL, "No quorum policy: Stop ALL resources");
             break;
 
         case no_quorum_demote:
             out->list_item(out, NULL, "No quorum policy: Demote promotable "
                            "resources and stop all other resources");
             break;
 
         case no_quorum_ignore:
             out->list_item(out, NULL, "No quorum policy: Ignore");
             break;
 
         case no_quorum_suicide:
             out->list_item(out, NULL, "No quorum policy: Suicide");
             break;
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-options", "pe_working_set_t *")
 static int
 cluster_options_xml(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
 
     const char *no_quorum_policy = NULL;
     char *stonith_timeout_str = pcmk__itoa(data_set->stonith_timeout);
     char *priority_fencing_delay_str = pcmk__itoa(data_set->priority_fencing_delay * 1000);
 
     switch (data_set->no_quorum_policy) {
         case no_quorum_freeze:
             no_quorum_policy = "freeze";
             break;
 
         case no_quorum_stop:
             no_quorum_policy = "stop";
             break;
 
         case no_quorum_demote:
             no_quorum_policy = "demote";
             break;
 
         case no_quorum_ignore:
             no_quorum_policy = "ignore";
             break;
 
         case no_quorum_suicide:
             no_quorum_policy = "suicide";
             break;
     }
 
     pcmk__output_create_xml_node(out, "cluster_options",
                                  "stonith-enabled", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)),
                                  "symmetric-cluster", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)),
                                  "no-quorum-policy", no_quorum_policy,
                                  "maintenance-mode", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)),
                                  "stop-all-resources", pcmk__btoa(pcmk_is_set(data_set->flags, pe_flag_stop_everything)),
                                  "stonith-timeout-ms", stonith_timeout_str,
                                  "priority-fencing-delay-ms", priority_fencing_delay_str,
                                  NULL);
     free(stonith_timeout_str);
     free(priority_fencing_delay_str);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
 static int
 cluster_stack_html(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "li", NULL);
 
     pcmk_create_html_node(node, "span", NULL, "bold", "Stack: ");
     pcmk_create_html_node(node, "span", NULL, NULL, stack_s);
 
     if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
         pcmk_create_html_node(node, "span", NULL, NULL, " (");
         pcmk_create_html_node(node, "span", NULL, NULL,
                               pcmk__pcmkd_state_enum2friendly(pcmkd_state));
         pcmk_create_html_node(node, "span", NULL, NULL, ")");
     }
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
 static int
 cluster_stack_text(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
 
     if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
         out->list_item(out, "Stack", "%s (%s)",
                        stack_s, pcmk__pcmkd_state_enum2friendly(pcmkd_state));
     } else {
         out->list_item(out, "Stack", "%s", stack_s);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-stack", "const char *", "enum pcmk_pacemakerd_state")
 static int
 cluster_stack_xml(pcmk__output_t *out, va_list args) {
     const char *stack_s = va_arg(args, const char *);
     enum pcmk_pacemakerd_state pcmkd_state =
         (enum pcmk_pacemakerd_state) va_arg(args, int);
 
     const char *state_s = NULL;
 
     if (pcmkd_state != pcmk_pacemakerd_state_invalid) {
         state_s = pcmk_pacemakerd_api_daemon_state_enum2text(pcmkd_state);
     }
 
     pcmk__output_create_xml_node(out, "stack",
                                  "type", stack_s,
                                  "pacemakerd-state", state_s,
                                  NULL);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
                   "const char *", "const char *", "const char *")
 static int
 cluster_times_html(pcmk__output_t *out, va_list args) {
     const char *our_nodename = va_arg(args, const char *);
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     xmlNodePtr updated_node = pcmk__output_create_xml_node(out, "li", NULL);
     xmlNodePtr changed_node = pcmk__output_create_xml_node(out, "li", NULL);
 
     char *time_s = pcmk__epoch2str(NULL, 0);
 
     pcmk_create_html_node(updated_node, "span", NULL, "bold", "Last updated: ");
     pcmk_create_html_node(updated_node, "span", NULL, NULL, time_s);
 
     if (our_nodename != NULL) {
         pcmk_create_html_node(updated_node, "span", NULL, NULL, " on ");
         pcmk_create_html_node(updated_node, "span", NULL, NULL, our_nodename);
     }
 
     free(time_s);
     time_s = last_changed_string(last_written, user, client, origin);
 
     pcmk_create_html_node(changed_node, "span", NULL, "bold", "Last change: ");
     pcmk_create_html_node(changed_node, "span", NULL, NULL, time_s);
 
     free(time_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
                   "const char *", "const char *", "const char *")
 static int
 cluster_times_xml(pcmk__output_t *out, va_list args) {
     const char *our_nodename = va_arg(args, const char *);
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     char *time_s = pcmk__epoch2str(NULL, 0);
 
     pcmk__output_create_xml_node(out, "last_update",
                                  "time", time_s,
                                  "origin", our_nodename,
                                  NULL);
 
     pcmk__output_create_xml_node(out, "last_change",
                                  "time", last_written ? last_written : "",
                                  "user", user ? user : "",
                                  "client", client ? client : "",
                                  "origin", origin ? origin : "",
                                  NULL);
 
     free(time_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("cluster-times", "const char *", "const char *",
                   "const char *", "const char *", "const char *")
 static int
 cluster_times_text(pcmk__output_t *out, va_list args) {
     const char *our_nodename = va_arg(args, const char *);
     const char *last_written = va_arg(args, const char *);
     const char *user = va_arg(args, const char *);
     const char *client = va_arg(args, const char *);
     const char *origin = va_arg(args, const char *);
 
     char *time_s = pcmk__epoch2str(NULL, 0);
 
     out->list_item(out, "Last updated", "%s%s%s",
                    time_s, (our_nodename != NULL)? " on " : "",
                    pcmk__s(our_nodename, ""));
 
     free(time_s);
     time_s = last_changed_string(last_written, user, client, origin);
 
     out->list_item(out, "Last change", " %s", time_s);
 
     free(time_s);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Display a failed action in less-technical natural language
  *
  * \param[in,out] out          Output object to use for display
  * \param[in]     xml_op       XML containing failed action
  * \param[in]     op_key       Operation key of failed action
  * \param[in]     node_name    Where failed action occurred
  * \param[in]     rc           OCF exit code of failed action
  * \param[in]     status       Execution status of failed action
  * \param[in]     exit_reason  Exit reason given for failed action
  * \param[in]     exec_time    String containing execution time in milliseconds
  */
 static void
 failed_action_friendly(pcmk__output_t *out, const xmlNode *xml_op,
                        const char *op_key, const char *node_name, int rc,
                        int status, const char *exit_reason,
                        const char *exec_time)
 {
     char *rsc_id = NULL;
     char *task = NULL;
     guint interval_ms = 0;
     time_t last_change_epoch = 0;
     GString *str = NULL;
 
     if (pcmk__str_empty(op_key)
         || !parse_op_key(op_key, &rsc_id, &task, &interval_ms)) {
         rsc_id = strdup("unknown resource");
         task = strdup("unknown action");
         interval_ms = 0;
     }
     CRM_ASSERT((rsc_id != NULL) && (task != NULL));
 
     str = g_string_sized_new(256); // Should be sufficient for most messages
 
     pcmk__g_strcat(str, rsc_id, " ", NULL);
 
     if (interval_ms != 0) {
         pcmk__g_strcat(str, pcmk__readable_interval(interval_ms), "-interval ",
                        NULL);
     }
     pcmk__g_strcat(str, pcmk__readable_action(task, interval_ms), " on ",
                    node_name, NULL);
 
     if (status == PCMK_EXEC_DONE) {
         pcmk__g_strcat(str, " returned '", services_ocf_exitcode_str(rc), "'",
                        NULL);
         if (!pcmk__str_empty(exit_reason)) {
             pcmk__g_strcat(str, " (", exit_reason, ")", NULL);
         }
 
     } else {
         pcmk__g_strcat(str, " could not be executed (",
                        pcmk_exec_status_str(status), NULL);
         if (!pcmk__str_empty(exit_reason)) {
             pcmk__g_strcat(str, ": ", exit_reason, NULL);
         }
         g_string_append_c(str, ')');
     }
 
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change_epoch) == pcmk_ok) {
         char *s = pcmk__epoch2str(&last_change_epoch, 0);
 
         pcmk__g_strcat(str, " at ", s, NULL);
         free(s);
     }
     if (!pcmk__str_empty(exec_time)) {
         int exec_time_ms = 0;
 
         if ((pcmk__scan_min_int(exec_time, &exec_time_ms, 0) == pcmk_rc_ok)
             && (exec_time_ms > 0)) {
 
             pcmk__g_strcat(str, " after ",
                            pcmk__readable_interval(exec_time_ms), NULL);
         }
     }
 
     out->list_item(out, NULL, "%s", str->str);
     g_string_free(str, TRUE);
     free(rsc_id);
     free(task);
 }
 
 /*!
  * \internal
  * \brief Display a failed action with technical details
  *
  * \param[in,out] out          Output object to use for display
  * \param[in]     xml_op       XML containing failed action
  * \param[in]     op_key       Operation key of failed action
  * \param[in]     node_name    Where failed action occurred
  * \param[in]     rc           OCF exit code of failed action
  * \param[in]     status       Execution status of failed action
  * \param[in]     exit_reason  Exit reason given for failed action
  * \param[in]     exec_time    String containing execution time in milliseconds
  */
 static void
 failed_action_technical(pcmk__output_t *out, const xmlNode *xml_op,
                         const char *op_key, const char *node_name, int rc,
                         int status, const char *exit_reason,
                         const char *exec_time)
 {
     const char *call_id = crm_element_value(xml_op, XML_LRM_ATTR_CALLID);
     const char *queue_time = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
     const char *exit_status = services_ocf_exitcode_str(rc);
     const char *lrm_status = pcmk_exec_status_str(status);
     time_t last_change_epoch = 0;
     GString *str = NULL;
 
     if (pcmk__str_empty(op_key)) {
         op_key = "unknown operation";
     }
     if (pcmk__str_empty(exit_status)) {
         exit_status = "unknown exit status";
     }
     if (pcmk__str_empty(call_id)) {
         call_id = "unknown";
     }
 
     str = g_string_sized_new(256);
 
     g_string_append_printf(str, "%s on %s '%s' (%d): call=%s, status='%s'",
                            op_key, node_name, exit_status, rc, call_id,
                            lrm_status);
 
     if (!pcmk__str_empty(exit_reason)) {
         pcmk__g_strcat(str, ", exitreason='", exit_reason, "'", NULL);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change_epoch) == pcmk_ok) {
         char *last_change_str = pcmk__epoch2str(&last_change_epoch, 0);
 
         pcmk__g_strcat(str,
                        ", " XML_RSC_OP_LAST_CHANGE "="
                        "'", last_change_str, "'", NULL);
         free(last_change_str);
     }
     if (!pcmk__str_empty(queue_time)) {
         pcmk__g_strcat(str, ", queued=", queue_time, "ms", NULL);
     }
     if (!pcmk__str_empty(exec_time)) {
         pcmk__g_strcat(str, ", exec=", exec_time, "ms", NULL);
     }
 
     out->list_item(out, NULL, "%s", str->str);
     g_string_free(str, TRUE);
 }
 
 PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
 static int
 failed_action_default(pcmk__output_t *out, va_list args)
 {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     const char *op_key = pe__xe_history_key(xml_op);
     const char *node_name = crm_element_value(xml_op, XML_ATTR_UNAME);
     const char *exit_reason = crm_element_value(xml_op,
                                                 XML_LRM_ATTR_EXIT_REASON);
     const char *exec_time = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
 
     int rc;
     int status;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, 0);
 
     if (pcmk__str_empty(node_name)) {
         node_name = "unknown node";
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_failed_detail)) {
         failed_action_technical(out, xml_op, op_key, node_name, rc, status,
                                 exit_reason, exec_time);
     } else {
         failed_action_friendly(out, xml_op, op_key, node_name, rc, status,
                                exit_reason, exec_time);
     }
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("failed-action", "xmlNodePtr", "uint32_t")
 static int
 failed_action_xml(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
 
     const char *op_key = pe__xe_history_key(xml_op);
     const char *op_key_name = "op_key";
     int rc;
     int status;
     const char *exit_reason = crm_element_value(xml_op, XML_LRM_ATTR_EXIT_REASON);
 
     time_t epoch = 0;
     char *rc_s = NULL;
     char *reason_s = crm_xml_escape(exit_reason ? exit_reason : "none");
     xmlNodePtr node = NULL;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_RC), &rc, 0);
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, 0);
 
     rc_s = pcmk__itoa(rc);
     if (crm_element_value(xml_op, XML_LRM_ATTR_TASK_KEY) == NULL) {
         op_key_name = "id";
     }
     node = pcmk__output_create_xml_node(out, "failure",
                                         op_key_name, op_key,
                                         "node", crm_element_value(xml_op, XML_ATTR_UNAME),
                                         "exitstatus", services_ocf_exitcode_str(rc),
                                         "exitreason", pcmk__s(reason_s, ""),
                                         "exitcode", rc_s,
                                         "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                         "status", pcmk_exec_status_str(status),
                                         NULL);
     free(rc_s);
 
     if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                  &epoch) == pcmk_ok) && (epoch > 0)) {
         guint interval_ms = 0;
         char *interval_ms_s = NULL;
         char *rc_change = pcmk__epoch2str(&epoch,
                                           crm_time_log_date
                                           |crm_time_log_timeofday
                                           |crm_time_log_with_timezone);
 
         crm_element_value_ms(xml_op, XML_LRM_ATTR_INTERVAL_MS, &interval_ms);
         interval_ms_s = crm_strdup_printf("%u", interval_ms);
 
         pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE, rc_change,
                            "queued", crm_element_value(xml_op, XML_RSC_OP_T_QUEUE),
                            "exec", crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
                            "interval", interval_ms_s,
                            "task", crm_element_value(xml_op, XML_LRM_ATTR_TASK),
                            NULL);
 
         free(interval_ms_s);
         free(rc_change);
     }
 
     free(reason_s);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("failed-action-list", "pe_working_set_t *", "GList *",
                   "GList *", "uint32_t", "bool")
 static int
 failed_action_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     xmlNode *xml_op = NULL;
     int rc = pcmk_rc_no_output;
 
     if (xmlChildElementCount(data_set->failed) == 0) {
         return rc;
     }
 
     for (xml_op = pcmk__xml_first_child(data_set->failed); xml_op != NULL;
          xml_op = pcmk__xml_next(xml_op)) {
         char *rsc = NULL;
 
         if (!pcmk__str_in_list(crm_element_value(xml_op, XML_ATTR_UNAME), only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         if (pcmk_xe_mask_probe_failure(xml_op)) {
             continue;
         }
 
         if (!parse_op_key(pe__xe_history_key(xml_op), &rsc, NULL, NULL)) {
             continue;
         }
 
         if (!pcmk__str_in_list(rsc, only_rsc, pcmk__str_star_matches)) {
             free(rsc);
             continue;
         }
 
         free(rsc);
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Failed Resource Actions");
         out->message(out, "failed-action", xml_op, show_opts);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 status_node(pe_node_t *node, xmlNodePtr parent, uint32_t show_opts)
 {
     int health = pe__node_health(node);
 
     // Cluster membership
     if (node->details->online) {
         pcmk_create_html_node(parent, "span", NULL, "online", " online");
     } else {
         pcmk_create_html_node(parent, "span", NULL, "offline", " OFFLINE");
     }
 
     // Standby mode
     if (node->details->standby_onfail && (node->details->running_rsc != NULL)) {
         pcmk_create_html_node(parent, "span", NULL, "standby",
                               " (in standby due to on-fail,"
                               " with active resources)");
     } else if (node->details->standby_onfail) {
         pcmk_create_html_node(parent, "span", NULL, "standby",
                               " (in standby due to on-fail)");
     } else if (node->details->standby && (node->details->running_rsc != NULL)) {
         pcmk_create_html_node(parent, "span", NULL, "standby",
                               " (in standby, with active resources)");
     } else if (node->details->standby) {
         pcmk_create_html_node(parent, "span", NULL, "standby", " (in standby)");
     }
 
     // Maintenance mode
     if (node->details->maintenance) {
         pcmk_create_html_node(parent, "span", NULL, "maint",
                               " (in maintenance mode)");
     }
 
     // Node health
     if (health < 0) {
         pcmk_create_html_node(parent, "span", NULL, "health_red",
                               " (health is RED)");
     } else if (health == 0) {
         pcmk_create_html_node(parent, "span", NULL, "health_yellow",
                               " (health is YELLOW)");
     }
 
     // Feature set
     if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
         const char *feature_set = get_node_feature_set(node);
         if (feature_set != NULL) {
             char *buf = crm_strdup_printf(", feature set %s", feature_set);
             pcmk_create_html_node(parent, "span", NULL, NULL, buf);
             free(buf);
         }
     }
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool",
                   "GList *", "GList *")
 static int
 node_html(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool full = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
 
     if (full) {
         xmlNodePtr item_node;
 
         if (pcmk_all_flags_set(show_opts, pcmk_show_brief | pcmk_show_rscs_by_node)) {
             GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
 
             out->begin_list(out, NULL, NULL, "%s:", node_name);
             item_node = pcmk__output_xml_create_parent(out, "li", NULL);
             pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
             status_node(node, item_node, show_opts);
 
             if (rscs != NULL) {
                 uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
                 out->begin_list(out, NULL, NULL, "Resources");
                 pe__rscs_brief_output(out, rscs, new_show_opts);
                 out->end_list(out);
             }
 
             pcmk__output_xml_pop_parent(out);
             out->end_list(out);
 
         } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             GList *lpc2 = NULL;
             int rc = pcmk_rc_no_output;
 
             out->begin_list(out, NULL, NULL, "%s:", node_name);
             item_node = pcmk__output_xml_create_parent(out, "li", NULL);
             pcmk_create_html_node(item_node, "span", NULL, NULL, "Status:");
             status_node(node, item_node, show_opts);
 
             for (lpc2 = node->details->running_rsc; lpc2 != NULL; lpc2 = lpc2->next) {
                 pe_resource_t *rsc = (pe_resource_t *) lpc2->data;
                 PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Resources");
 
                 show_opts |= pcmk_show_rsc_only;
                 out->message(out, crm_map_element_name(rsc->xml), show_opts,
                              rsc, only_node, only_rsc);
             }
 
             PCMK__OUTPUT_LIST_FOOTER(out, rc);
             pcmk__output_xml_pop_parent(out);
             out->end_list(out);
 
         } else {
             char *buf = crm_strdup_printf("%s:", node_name);
 
             item_node = pcmk__output_create_xml_node(out, "li", NULL);
             pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
             status_node(node, item_node, show_opts);
 
             free(buf);
         }
     } else {
         out->begin_list(out, NULL, NULL, "%s:", node_name);
     }
 
     free(node_name);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Get a human-friendly textual description of a node's status
  *
  * \param[in] node  Node to check
  *
  * \return String representation of node's status
  */
 static const char *
 node_text_status(const pe_node_t *node)
 {
     if (node->details->unclean) {
         if (node->details->online) {
             return "UNCLEAN (online)";
 
         } else if (node->details->pending) {
             return "UNCLEAN (pending)";
 
         } else {
             return "UNCLEAN (offline)";
         }
 
     } else if (node->details->pending) {
         return "pending";
 
     } else if (node->details->standby_onfail && node->details->online) {
         return "standby (on-fail)";
 
     } else if (node->details->standby) {
         if (node->details->online) {
             if (node->details->running_rsc) {
                 return "standby (with active resources)";
             } else {
                 return "standby";
             }
         } else {
             return "OFFLINE (standby)";
         }
 
     } else if (node->details->maintenance) {
         if (node->details->online) {
             return "maintenance";
         } else {
             return "OFFLINE (maintenance)";
         }
 
     } else if (node->details->online) {
         return "online";
     }
 
     return "OFFLINE";
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
 static int
 node_text(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool full = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     if (full) {
         char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
         GString *str = g_string_sized_new(64);
         int health = pe__node_health(node);
 
         // Create a summary line with node type, name, and status
         if (pe__is_guest_node(node)) {
             g_string_append(str, "GuestNode");
         } else if (pe__is_remote_node(node)) {
             g_string_append(str, "RemoteNode");
         } else {
             g_string_append(str, "Node");
         }
         pcmk__g_strcat(str, " ", node_name, ": ", node_text_status(node), NULL);
 
         if (health < 0) {
             g_string_append(str, " (health is RED)");
         } else if (health == 0) {
             g_string_append(str, " (health is YELLOW)");
         }
         if (pcmk_is_set(show_opts, pcmk_show_feature_set)) {
             const char *feature_set = get_node_feature_set(node);
             if (feature_set != NULL) {
                 pcmk__g_strcat(str, ", feature set ", feature_set, NULL);
             }
         }
 
         /* If we're grouping by node, print its resources */
         if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             if (pcmk_is_set(show_opts, pcmk_show_brief)) {
                 GList *rscs = pe__filter_rsc_list(node->details->running_rsc, only_rsc);
 
                 if (rscs != NULL) {
                     uint32_t new_show_opts = (show_opts | pcmk_show_rsc_only) & ~pcmk_show_inactive_rscs;
                     out->begin_list(out, NULL, NULL, "%s", str->str);
                     out->begin_list(out, NULL, NULL, "Resources");
 
                     pe__rscs_brief_output(out, rscs, new_show_opts);
 
                     out->end_list(out);
                     out->end_list(out);
 
                     g_list_free(rscs);
                 }
 
             } else {
                 GList *gIter2 = NULL;
 
                 out->begin_list(out, NULL, NULL, "%s", str->str);
                 out->begin_list(out, NULL, NULL, "Resources");
 
                 for (gIter2 = node->details->running_rsc; gIter2 != NULL; gIter2 = gIter2->next) {
                     pe_resource_t *rsc = (pe_resource_t *) gIter2->data;
 
                     show_opts |= pcmk_show_rsc_only;
                     out->message(out, crm_map_element_name(rsc->xml), show_opts,
                                  rsc, only_node, only_rsc);
                 }
 
                 out->end_list(out);
                 out->end_list(out);
             }
         } else {
             out->list_item(out, NULL, "%s", str->str);
         }
 
         g_string_free(str, TRUE);
         free(node_name);
     } else {
         char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
         out->begin_list(out, NULL, NULL, "Node: %s", node_name);
         free(node_name);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node", "pe_node_t *", "uint32_t", "bool", "GList *", "GList *")
 static int
 node_xml(pcmk__output_t *out, va_list args) {
     pe_node_t *node = va_arg(args, pe_node_t *);
     uint32_t show_opts G_GNUC_UNUSED = va_arg(args, uint32_t);
     bool full = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     if (full) {
         const char *node_type = "unknown";
         char *length_s = pcmk__itoa(g_list_length(node->details->running_rsc));
         int health = pe__node_health(node);
         const char *health_s = NULL;
         const char *feature_set;
 
         switch (node->details->type) {
             case node_member:
                 node_type = "member";
                 break;
             case node_remote:
                 node_type = "remote";
                 break;
             case node_ping:
                 node_type = "ping";
                 break;
         }
 
         if (health < 0) {
             health_s = "red";
         } else if (health == 0) {
             health_s = "yellow";
         } else {
             health_s = "green";
         }
 
         feature_set = get_node_feature_set(node);
 
         pe__name_and_nvpairs_xml(out, true, "node", 15,
                                  "name", node->details->uname,
                                  "id", node->details->id,
                                  "online", pcmk__btoa(node->details->online),
                                  "standby", pcmk__btoa(node->details->standby),
                                  "standby_onfail", pcmk__btoa(node->details->standby_onfail),
                                  "maintenance", pcmk__btoa(node->details->maintenance),
                                  "pending", pcmk__btoa(node->details->pending),
                                  "unclean", pcmk__btoa(node->details->unclean),
                                  "health", health_s,
                                  "feature_set", feature_set,
                                  "shutdown", pcmk__btoa(node->details->shutdown),
                                  "expected_up", pcmk__btoa(node->details->expected_up),
                                  "is_dc", pcmk__btoa(node->details->is_dc),
                                  "resources_running", length_s,
                                  "type", node_type);
 
         if (pe__is_guest_node(node)) {
             xmlNodePtr xml_node = pcmk__output_xml_peek_parent(out);
             crm_xml_add(xml_node, "id_as_resource", node->details->remote_rsc->container->id);
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             GList *lpc = NULL;
 
             for (lpc = node->details->running_rsc; lpc != NULL; lpc = lpc->next) {
                 pe_resource_t *rsc = (pe_resource_t *) lpc->data;
 
                 show_opts |= pcmk_show_rsc_only;
                 out->message(out, crm_map_element_name(rsc->xml), show_opts,
                              rsc, only_node, only_rsc);
             }
         }
 
         free(length_s);
 
         out->end_list(out);
     } else {
         pcmk__output_xml_create_parent(out, "node",
                                        "name", node->details->uname,
                                        NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
 static int
 node_attribute_text(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     bool add_extra = va_arg(args, int);
     int expected_score = va_arg(args, int);
 
     if (add_extra) {
         int v;
 
         if (value == NULL) {
             v = 0;
         } else {
             pcmk__scan_min_int(value, &v, INT_MIN);
         }
         if (v <= 0) {
             out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is lost", name, value);
         } else if (v < expected_score) {
             out->list_item(out, NULL, "%-32s\t: %-10s\t: Connectivity is degraded (Expected=%d)", name, value, expected_score);
         } else {
             out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
         }
     } else {
         out->list_item(out, NULL, "%-32s\t: %-10s", name, value);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
 static int
 node_attribute_html(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     bool add_extra = va_arg(args, int);
     int expected_score = va_arg(args, int);
 
     if (add_extra) {
         int v;
         char *s = crm_strdup_printf("%s: %s", name, value);
         xmlNodePtr item_node = pcmk__output_create_xml_node(out, "li", NULL);
 
         if (value == NULL) {
             v = 0;
         } else {
             pcmk__scan_min_int(value, &v, INT_MIN);
         }
 
         pcmk_create_html_node(item_node, "span", NULL, NULL, s);
         free(s);
 
         if (v <= 0) {
             pcmk_create_html_node(item_node, "span", NULL, "bold", "(connectivity is lost)");
         } else if (v < expected_score) {
             char *buf = crm_strdup_printf("(connectivity is degraded -- expected %d", expected_score);
             pcmk_create_html_node(item_node, "span", NULL, "bold", buf);
             free(buf);
         }
     } else {
         out->list_item(out, NULL, "%s: %s", name, value);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
 static int
 node_and_op(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
 
     pe_resource_t *rsc = NULL;
     gchar *node_str = NULL;
     char *last_change_str = NULL;
 
     const char *op_rsc = crm_element_value(xml_op, "resource");
     int status;
     time_t last_change = 0;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, PCMK_EXEC_UNKNOWN);
 
     rsc = pe_find_resource(data_set->resources, op_rsc);
 
     if (rsc) {
         const pe_node_t *node = pe__current_node(rsc);
         const char *target_role = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET_ROLE);
         uint32_t show_opts = pcmk_show_rsc_only | pcmk_show_pending;
 
         if (node == NULL) {
             node = rsc->pending_node;
         }
 
         node_str = pcmk__native_output_string(rsc, rsc_printable_id(rsc), node,
                                               show_opts, target_role, false);
     } else {
         node_str = crm_strdup_printf("Unknown resource %s", op_rsc);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change) == pcmk_ok) {
         last_change_str = crm_strdup_printf(", %s='%s', exec=%sms",
                                             XML_RSC_OP_LAST_CHANGE,
                                             pcmk__trim(ctime(&last_change)),
                                             crm_element_value(xml_op, XML_RSC_OP_T_EXEC));
     }
 
     out->list_item(out, NULL, "%s: %s (node=%s, call=%s, rc=%s%s): %s",
                    node_str, pe__xe_history_key(xml_op),
                    crm_element_value(xml_op, XML_ATTR_UNAME),
                    crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                    crm_element_value(xml_op, XML_LRM_ATTR_RC),
                    last_change_str ? last_change_str : "",
                    pcmk_exec_status_str(status));
 
     g_free(node_str);
     free(last_change_str);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-and-op", "pe_working_set_t *", "xmlNodePtr")
 static int
 node_and_op_xml(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
 
     pe_resource_t *rsc = NULL;
     const char *op_rsc = crm_element_value(xml_op, "resource");
     int status;
     time_t last_change = 0;
     xmlNode *node = NULL;
 
     pcmk__scan_min_int(crm_element_value(xml_op, XML_LRM_ATTR_OPSTATUS),
                        &status, PCMK_EXEC_UNKNOWN);
     node = pcmk__output_create_xml_node(out, "operation",
                                         "op", pe__xe_history_key(xml_op),
                                         "node", crm_element_value(xml_op, XML_ATTR_UNAME),
                                         "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                         "rc", crm_element_value(xml_op, XML_LRM_ATTR_RC),
                                         "status", pcmk_exec_status_str(status),
                                         NULL);
 
     rsc = pe_find_resource(data_set->resources, op_rsc);
 
     if (rsc) {
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
         const char *kind = crm_element_value(rsc->xml, XML_ATTR_TYPE);
         char *agent_tuple = NULL;
 
         agent_tuple = crm_strdup_printf("%s:%s:%s", class,
                                         pcmk_is_set(pcmk_get_ra_caps(class), pcmk_ra_cap_provider) ? crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER) : "",
                                         kind);
 
         pcmk__xe_set_props(node, "rsc", rsc_printable_id(rsc),
                            "agent", agent_tuple,
                            NULL);
         free(agent_tuple);
     }
 
     if (crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                 &last_change) == pcmk_ok) {
         pcmk__xe_set_props(node, XML_RSC_OP_LAST_CHANGE,
                            pcmk__trim(ctime(&last_change)),
                            XML_RSC_OP_T_EXEC, crm_element_value(xml_op, XML_RSC_OP_T_EXEC),
                            NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute", "const char *", "const char *", "bool", "int")
 static int
 node_attribute_xml(pcmk__output_t *out, va_list args) {
     const char *name = va_arg(args, const char *);
     const char *value = va_arg(args, const char *);
     bool add_extra = va_arg(args, int);
     int expected_score = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "attribute",
                                                    "name", name,
                                                    "value", value,
                                                    NULL);
 
     if (add_extra) {
         char *buf = pcmk__itoa(expected_score);
         crm_xml_add(node, "expected", buf);
         free(buf);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-attribute-list", "pe_working_set_t *", "uint32_t",
                   "bool", "GList *", "GList *")
 static int
 node_attribute_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     int rc = pcmk_rc_no_output;
 
     /* Display each node's attributes */
     for (GList *gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = gIter->data;
 
         GList *attr_list = NULL;
         GHashTableIter iter;
         gpointer key;
 
         if (!node || !node->details || !node->details->online) {
             continue;
         }
 
         g_hash_table_iter_init(&iter, node->details->attrs);
         while (g_hash_table_iter_next (&iter, &key, NULL)) {
             attr_list = filter_attr_list(attr_list, key);
         }
 
         if (attr_list == NULL) {
             continue;
         }
 
         if (!pcmk__str_in_list(node->details->uname, only_node, pcmk__str_star_matches|pcmk__str_casei)) {
             g_list_free(attr_list);
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node Attributes");
 
         out->message(out, "node", node, show_opts, false, only_node, only_rsc);
 
         for (GList *aIter = attr_list; aIter != NULL; aIter = aIter->next) {
             const char *name = aIter->data;
             const char *value = NULL;
             int expected_score = 0;
             bool add_extra = false;
 
             value = pe_node_attribute_raw(node, name);
 
             add_extra = add_extra_info(node, node->details->running_rsc,
                                        data_set, name, &expected_score);
 
             /* Print attribute name and value */
             out->message(out, "node-attribute", name, value, add_extra,
                          expected_score);
         }
 
         g_list_free(attr_list);
         out->end_list(out);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
 static int
 node_capacity(pcmk__output_t *out, va_list args)
 {
     const pe_node_t *node = va_arg(args, pe_node_t *);
     const char *comment = va_arg(args, const char *);
 
     char *dump_text = crm_strdup_printf("%s: %s capacity:",
                                         comment, pe__node_name(node));
 
     g_hash_table_foreach(node->details->utilization, append_dump_text, &dump_text);
     out->list_item(out, NULL, "%s", dump_text);
     free(dump_text);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-capacity", "const pe_node_t *", "const char *")
 static int
 node_capacity_xml(pcmk__output_t *out, va_list args)
 {
     const pe_node_t *node = va_arg(args, pe_node_t *);
     const char *comment = va_arg(args, const char *);
 
     xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "capacity",
                                                        "node", node->details->uname,
                                                        "comment", comment,
                                                        NULL);
     g_hash_table_foreach(node->details->utilization, add_dump_node, xml_node);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-history-list", "pe_working_set_t *", "pe_node_t *", "xmlNodePtr",
                   "GList *", "GList *", "uint32_t", "uint32_t")
 static int
 node_history_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     xmlNode *node_state = va_arg(args, xmlNode *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     xmlNode *lrm_rsc = NULL;
     xmlNode *rsc_entry = NULL;
     int rc = pcmk_rc_no_output;
 
     lrm_rsc = find_xml_node(node_state, XML_CIB_TAG_LRM, FALSE);
     lrm_rsc = find_xml_node(lrm_rsc, XML_LRM_TAG_RESOURCES, FALSE);
 
     /* Print history of each of the node's resources */
     for (rsc_entry = first_named_child(lrm_rsc, XML_LRM_TAG_RESOURCE);
          rsc_entry != NULL; rsc_entry = crm_next_same_xml(rsc_entry)) {
         const char *rsc_id = crm_element_value(rsc_entry, XML_ATTR_ID);
         pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
         const pe_resource_t *parent = pe__const_top_resource(rsc, false);
 
         /* We can't use is_filtered here to filter group resources.  For is_filtered,
          * we have to decide whether to check the parent or not.  If we check the
          * parent, all elements of a group will always be printed because that's how
          * is_filtered works for groups.  If we do not check the parent, sometimes
          * this will filter everything out.
          *
          * For other resource types, is_filtered is okay.
          */
-        if (parent->variant == pe_group) {
+        if (parent->variant == pcmk_rsc_variant_group) {
             if (!pcmk__str_in_list(rsc_printable_id(rsc), only_rsc,
                                    pcmk__str_star_matches)
                 && !pcmk__str_in_list(rsc_printable_id(parent), only_rsc,
                                       pcmk__str_star_matches)) {
                 continue;
             }
         } else {
             if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
                 continue;
             }
         }
 
         if (!pcmk_is_set(section_opts, pcmk_section_operations)) {
             time_t last_failure = 0;
             int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
                                              NULL);
 
             if (failcount <= 0) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, show_opts, false, only_node,
                              only_rsc);
             }
 
             out->message(out, "resource-history", rsc, rsc_id, false,
                          failcount, last_failure, false);
         } else {
             GList *op_list = get_operation_list(rsc_entry);
             pe_resource_t *rsc = pe_find_resource(data_set->resources,
                                                   crm_element_value(rsc_entry, XML_ATTR_ID));
 
             if (op_list == NULL) {
                 continue;
             }
 
             if (rc == pcmk_rc_no_output) {
                 rc = pcmk_rc_ok;
                 out->message(out, "node", node, show_opts, false, only_node,
                              only_rsc);
             }
 
             out->message(out, "resource-operation-list", data_set, rsc, node,
                          op_list, show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
 static int
 node_list_html(pcmk__output_t *out, va_list args) {
     GList *nodes = va_arg(args, GList *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
 
     int rc = pcmk_rc_no_output;
 
     for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, false, rc, "Node List");
 
         out->message(out, "node", node, show_opts, true, only_node, only_rsc);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
 static int
 node_list_text(pcmk__output_t *out, va_list args) {
     GList *nodes = va_arg(args, GList *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     /* space-separated lists of node names */
     GString *online_nodes = NULL;
     GString *online_remote_nodes = NULL;
     GString *online_guest_nodes = NULL;
     GString *offline_nodes = NULL;
     GString *offline_remote_nodes = NULL;
 
     int rc = pcmk_rc_no_output;
 
     for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
         char *node_name = pe__node_display_name(node, pcmk_is_set(show_opts, pcmk_show_node_id));
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             free(node_name);
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc, "Node List");
 
         // Determine whether to display node individually or in a list
         if (node->details->unclean || node->details->pending
             || (node->details->standby_onfail && node->details->online)
             || node->details->standby || node->details->maintenance
             || pcmk_is_set(show_opts, pcmk_show_rscs_by_node)
             || pcmk_is_set(show_opts, pcmk_show_feature_set)
             || (pe__node_health(node) <= 0)) {
             // Display node individually
 
         } else if (node->details->online) {
             // Display online node in a list
             if (pe__is_guest_node(node)) {
                 pcmk__add_word(&online_guest_nodes, 1024, node_name);
 
             } else if (pe__is_remote_node(node)) {
                 pcmk__add_word(&online_remote_nodes, 1024, node_name);
 
             } else {
                 pcmk__add_word(&online_nodes, 1024, node_name);
             }
             free(node_name);
             continue;
 
         } else {
             // Display offline node in a list
             if (pe__is_remote_node(node)) {
                 pcmk__add_word(&offline_remote_nodes, 1024, node_name);
 
             } else if (pe__is_guest_node(node)) {
                 /* ignore offline guest nodes */
 
             } else {
                 pcmk__add_word(&offline_nodes, 1024, node_name);
             }
             free(node_name);
             continue;
         }
 
         /* If we get here, node is in bad state, or we're grouping by node */
         out->message(out, "node", node, show_opts, true, only_node, only_rsc);
         free(node_name);
     }
 
     /* If we're not grouping by node, summarize nodes by status */
     if (online_nodes != NULL) {
         out->list_item(out, "Online", "[ %s ]",
                        (const char *) online_nodes->str);
         g_string_free(online_nodes, TRUE);
     }
     if (offline_nodes != NULL) {
         out->list_item(out, "OFFLINE", "[ %s ]",
                        (const char *) offline_nodes->str);
         g_string_free(offline_nodes, TRUE);
     }
     if (online_remote_nodes) {
         out->list_item(out, "RemoteOnline", "[ %s ]",
                        (const char *) online_remote_nodes->str);
         g_string_free(online_remote_nodes, TRUE);
     }
     if (offline_remote_nodes) {
         out->list_item(out, "RemoteOFFLINE", "[ %s ]",
                        (const char *) offline_remote_nodes->str);
         g_string_free(offline_remote_nodes, TRUE);
     }
     if (online_guest_nodes != NULL) {
         out->list_item(out, "GuestOnline", "[ %s ]",
                        (const char *) online_guest_nodes->str);
         g_string_free(online_guest_nodes, TRUE);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-list", "GList *", "GList *", "GList *", "uint32_t", "bool")
 static int
 node_list_xml(pcmk__output_t *out, va_list args) {
     GList *nodes = va_arg(args, GList *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer G_GNUC_UNUSED = va_arg(args, int);
 
     out->begin_list(out, NULL, NULL, "nodes");
     for (GList *gIter = nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         out->message(out, "node", node, show_opts, true, only_node, only_rsc);
     }
     out->end_list(out);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-summary", "pe_working_set_t *", "GList *", "GList *",
                   "uint32_t", "uint32_t", "bool")
 static int
 node_summary(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     uint32_t section_opts = va_arg(args, uint32_t);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_spacer = va_arg(args, int);
 
     xmlNode *node_state = NULL;
     xmlNode *cib_status = pcmk_find_cib_element(data_set->input,
                                                 XML_CIB_TAG_STATUS);
     int rc = pcmk_rc_no_output;
 
     if (xmlChildElementCount(cib_status) == 0) {
         return rc;
     }
 
     for (node_state = first_named_child(cib_status, XML_CIB_TAG_STATE);
          node_state != NULL; node_state = crm_next_same_xml(node_state)) {
         pe_node_t *node = pe_find_node_id(data_set->nodes, ID(node_state));
 
         if (!node || !node->details || !node->details->online) {
             continue;
         }
 
         if (!pcmk__str_in_list(node->details->uname, only_node,
                                pcmk__str_star_matches|pcmk__str_casei)) {
             continue;
         }
 
         PCMK__OUTPUT_LIST_HEADER(out, print_spacer, rc,
                                  pcmk_is_set(section_opts, pcmk_section_operations) ? "Operations" : "Migration Summary");
 
         out->message(out, "node-history-list", data_set, node, node_state,
                      only_node, only_rsc, section_opts, show_opts);
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
                   "const char *", "const char *")
 static int
 node_weight(pcmk__output_t *out, va_list args)
 {
     const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
     const char *prefix = va_arg(args, const char *);
     const char *uname = va_arg(args, const char *);
     const char *score = va_arg(args, const char *);
 
     if (rsc) {
         out->list_item(out, NULL, "%s: %s allocation score on %s: %s",
                        prefix, rsc->id, uname, score);
     } else {
         out->list_item(out, NULL, "%s: %s = %s", prefix, uname, score);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("node-weight", "const pe_resource_t *", "const char *",
                   "const char *", "const char *")
 static int
 node_weight_xml(pcmk__output_t *out, va_list args)
 {
     const pe_resource_t *rsc = va_arg(args, const pe_resource_t *);
     const char *prefix = va_arg(args, const char *);
     const char *uname = va_arg(args, const char *);
     const char *score = va_arg(args, const char *);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "node_weight",
                                                    "function", prefix,
                                                    "node", uname,
                                                    "score", score,
                                                    NULL);
 
     if (rsc) {
         crm_xml_add(node, "id", rsc->id);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
 static int
 op_history_text(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     const char *task = va_arg(args, const char *);
     const char *interval_ms_s = va_arg(args, const char *);
     int rc = va_arg(args, int);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *buf = op_history_string(xml_op, task, interval_ms_s, rc,
                                   pcmk_is_set(show_opts, pcmk_show_timing));
 
     out->list_item(out, NULL, "%s", buf);
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("op-history", "xmlNodePtr", "const char *", "const char *", "int", "uint32_t")
 static int
 op_history_xml(pcmk__output_t *out, va_list args) {
     xmlNodePtr xml_op = va_arg(args, xmlNodePtr);
     const char *task = va_arg(args, const char *);
     const char *interval_ms_s = va_arg(args, const char *);
     int rc = va_arg(args, int);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     char *rc_s = pcmk__itoa(rc);
     xmlNodePtr node = pcmk__output_create_xml_node(out, "operation_history",
                                                    "call", crm_element_value(xml_op, XML_LRM_ATTR_CALLID),
                                                    "task", task,
                                                    "rc", rc_s,
                                                    "rc_text", services_ocf_exitcode_str(rc),
                                                    NULL);
     free(rc_s);
 
     if (interval_ms_s && !pcmk__str_eq(interval_ms_s, "0", pcmk__str_casei)) {
         char *s = crm_strdup_printf("%sms", interval_ms_s);
         crm_xml_add(node, "interval", s);
         free(s);
     }
 
     if (pcmk_is_set(show_opts, pcmk_show_timing)) {
         const char *value = NULL;
         time_t epoch = 0;
 
         if ((crm_element_value_epoch(xml_op, XML_RSC_OP_LAST_CHANGE,
                                      &epoch) == pcmk_ok) && (epoch > 0)) {
             char *s = pcmk__epoch2str(&epoch, 0);
             crm_xml_add(node, XML_RSC_OP_LAST_CHANGE, s);
             free(s);
         }
 
         value = crm_element_value(xml_op, XML_RSC_OP_T_EXEC);
         if (value) {
             char *s = crm_strdup_printf("%sms", value);
             crm_xml_add(node, XML_RSC_OP_T_EXEC, s);
             free(s);
         }
         value = crm_element_value(xml_op, XML_RSC_OP_T_QUEUE);
         if (value) {
             char *s = crm_strdup_printf("%sms", value);
             crm_xml_add(node, XML_RSC_OP_T_QUEUE, s);
             free(s);
         }
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 promotion_score(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
     pe_node_t *chosen = va_arg(args, pe_node_t *);
     const char *score = va_arg(args, const char *);
 
     out->list_item(out, NULL, "%s promotion score on %s: %s",
                    child_rsc->id,
                    chosen? chosen->details->uname : "none",
                    score);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("promotion-score", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 promotion_score_xml(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *child_rsc = va_arg(args, pe_resource_t *);
     pe_node_t *chosen = va_arg(args, pe_node_t *);
     const char *score = va_arg(args, const char *);
 
     xmlNodePtr node = pcmk__output_create_xml_node(out, "promotion_score",
                                                    "id", child_rsc->id,
                                                    "score", score,
                                                    NULL);
 
     if (chosen) {
         crm_xml_add(node, "node", chosen->details->uname);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
 static int
 resource_config(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     bool raw = va_arg(args, int);
 
     char *rsc_xml = formatted_xml_buf(rsc, raw);
 
     out->output_xml(out, "xml", rsc_xml);
 
     free(rsc_xml);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-config", "pe_resource_t *", "bool")
 static int
 resource_config_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     bool raw = va_arg(args, int);
 
     char *rsc_xml = formatted_xml_buf(rsc, raw);
 
     pcmk__formatted_printf(out, "Resource XML:\n");
     out->output_xml(out, "xml", rsc_xml);
 
     free(rsc_xml);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
 static int
 resource_history_text(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     const char *rsc_id = va_arg(args, const char *);
     bool all = va_arg(args, int);
     int failcount = va_arg(args, int);
     time_t last_failure = va_arg(args, time_t);
     bool as_header = va_arg(args, int);
 
     char *buf = resource_history_string(rsc, rsc_id, all, failcount, last_failure);
 
     if (as_header) {
         out->begin_list(out, NULL, NULL, "%s", buf);
     } else {
         out->list_item(out, NULL, "%s", buf);
     }
 
     free(buf);
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-history", "pe_resource_t *", "const char *", "bool", "int", "time_t", "bool")
 static int
 resource_history_xml(pcmk__output_t *out, va_list args) {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     const char *rsc_id = va_arg(args, const char *);
     bool all = va_arg(args, int);
     int failcount = va_arg(args, int);
     time_t last_failure = va_arg(args, time_t);
     bool as_header = va_arg(args, int);
 
     xmlNodePtr node = pcmk__output_xml_create_parent(out, "resource_history",
                                                      "id", rsc_id,
                                                      NULL);
 
     if (rsc == NULL) {
         pcmk__xe_set_bool_attr(node, "orphan", true);
     } else if (all || failcount || last_failure > 0) {
         char *migration_s = pcmk__itoa(rsc->migration_threshold);
 
         pcmk__xe_set_props(node, "orphan", "false",
                            "migration-threshold", migration_s,
                            NULL);
         free(migration_s);
 
         if (failcount > 0) {
             char *s = pcmk__itoa(failcount);
 
             crm_xml_add(node, PCMK__FAIL_COUNT_PREFIX, s);
             free(s);
         }
 
         if (last_failure > 0) {
             char *s = pcmk__epoch2str(&last_failure, 0);
 
             crm_xml_add(node, PCMK__LAST_FAILURE_PREFIX, s);
             free(s);
         }
     }
 
     if (!as_header) {
         pcmk__output_xml_pop_parent(out);
     }
 
     return pcmk_rc_ok;
 }
 
 static void
 print_resource_header(pcmk__output_t *out, uint32_t show_opts)
 {
     if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
         /* Active resources have already been printed by node */
         out->begin_list(out, NULL, NULL, "Inactive Resources");
     } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
         out->begin_list(out, NULL, NULL, "Full List of Resources");
     } else {
         out->begin_list(out, NULL, NULL, "Active Resources");
     }
 }
 
 
 PCMK__OUTPUT_ARGS("resource-list", "pe_working_set_t *", "uint32_t", "bool",
                   "GList *", "GList *", "bool")
 static int
 resource_list(pcmk__output_t *out, va_list args)
 {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     uint32_t show_opts = va_arg(args, uint32_t);
     bool print_summary = va_arg(args, int);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
     bool print_spacer = va_arg(args, int);
 
     GList *rsc_iter;
     int rc = pcmk_rc_no_output;
     bool printed_header = false;
 
     /* If we already showed active resources by node, and
      * we're not showing inactive resources, we have nothing to do
      */
     if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node) &&
         !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
         return rc;
     }
 
     /* If we haven't already printed resources grouped by node,
      * and brief output was requested, print resource summary */
     if (pcmk_is_set(show_opts, pcmk_show_brief) && !pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
         GList *rscs = pe__filter_rsc_list(data_set->resources, only_rsc);
 
         PCMK__OUTPUT_SPACER_IF(out, print_spacer);
         print_resource_header(out, show_opts);
         printed_header = true;
 
         rc = pe__rscs_brief_output(out, rscs, show_opts);
         g_list_free(rscs);
     }
 
     /* For each resource, display it if appropriate */
     for (rsc_iter = data_set->resources; rsc_iter != NULL; rsc_iter = rsc_iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
         int x;
 
         /* Complex resources may have some sub-resources active and some inactive */
         gboolean is_active = rsc->fns->active(rsc, TRUE);
         gboolean partially_active = rsc->fns->active(rsc, FALSE);
 
         /* Skip inactive orphans (deleted but still in CIB) */
         if (pcmk_is_set(rsc->flags, pe_rsc_orphan) && !is_active) {
             continue;
 
         /* Skip active resources if we already displayed them by node */
         } else if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             if (is_active) {
                 continue;
             }
 
         /* Skip primitives already counted in a brief summary */
         } else if (pcmk_is_set(show_opts, pcmk_show_brief)
                    && (rsc->variant == pcmk_rsc_variant_primitive)) {
             continue;
 
         /* Skip resources that aren't at least partially active,
          * unless we're displaying inactive resources
          */
         } else if (!partially_active && !pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
             continue;
 
         } else if (partially_active && !pe__rsc_running_on_any(rsc, only_node)) {
             continue;
         }
 
         if (!printed_header) {
             PCMK__OUTPUT_SPACER_IF(out, print_spacer);
             print_resource_header(out, show_opts);
             printed_header = true;
         }
 
         /* Print this resource */
         x = out->message(out, crm_map_element_name(rsc->xml), show_opts, rsc,
                          only_node, only_rsc);
         if (x == pcmk_rc_ok) {
             rc = pcmk_rc_ok;
         }
     }
 
     if (print_summary && rc != pcmk_rc_ok) {
         if (!printed_header) {
             PCMK__OUTPUT_SPACER_IF(out, print_spacer);
             print_resource_header(out, show_opts);
             printed_header = true;
         }
 
         if (pcmk_is_set(show_opts, pcmk_show_rscs_by_node)) {
             out->list_item(out, NULL, "No inactive resources");
         } else if (pcmk_is_set(show_opts, pcmk_show_inactive_rscs)) {
             out->list_item(out, NULL, "No resources");
         } else {
             out->list_item(out, NULL, "No active resources");
         }
     }
 
     if (printed_header) {
         out->end_list(out);
     }
 
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("resource-operation-list", "pe_working_set_t *", "pe_resource_t *",
                   "pe_node_t *", "GList *", "uint32_t")
 static int
 resource_operation_list(pcmk__output_t *out, va_list args)
 {
     pe_working_set_t *data_set G_GNUC_UNUSED = va_arg(args, pe_working_set_t *);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     GList *op_list = va_arg(args, GList *);
     uint32_t show_opts = va_arg(args, uint32_t);
 
     GList *gIter = NULL;
     int rc = pcmk_rc_no_output;
 
     /* Print each operation */
     for (gIter = op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *xml_op = (xmlNode *) gIter->data;
         const char *task = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
         const char *interval_ms_s = crm_element_value(xml_op,
                                                       XML_LRM_ATTR_INTERVAL_MS);
         const char *op_rc = crm_element_value(xml_op, XML_LRM_ATTR_RC);
         int op_rc_i;
 
         pcmk__scan_min_int(op_rc, &op_rc_i, 0);
 
         /* Display 0-interval monitors as "probe" */
         if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)
             && pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches | pcmk__str_casei)) {
             task = "probe";
         }
 
         /* If this is the first printed operation, print heading for resource */
         if (rc == pcmk_rc_no_output) {
             time_t last_failure = 0;
             int failcount = pe_get_failcount(node, rsc, &last_failure, pe_fc_default,
                                              NULL);
 
             out->message(out, "resource-history", rsc, rsc_printable_id(rsc), true,
                          failcount, last_failure, true);
             rc = pcmk_rc_ok;
         }
 
         /* Print the operation */
         out->message(out, "op-history", xml_op, task, interval_ms_s,
                      op_rc_i, show_opts);
     }
 
     /* Free the list we created (no need to free the individual items) */
     g_list_free(op_list);
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 resource_util(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     const char *fn = va_arg(args, const char *);
 
     char *dump_text = crm_strdup_printf("%s: %s utilization on %s:",
                                         fn, rsc->id, pe__node_name(node));
 
     g_hash_table_foreach(rsc->utilization, append_dump_text, &dump_text);
     out->list_item(out, NULL, "%s", dump_text);
     free(dump_text);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("resource-util", "pe_resource_t *", "pe_node_t *", "const char *")
 static int
 resource_util_xml(pcmk__output_t *out, va_list args)
 {
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     pe_node_t *node = va_arg(args, pe_node_t *);
     const char *fn = va_arg(args, const char *);
 
     xmlNodePtr xml_node = pcmk__output_create_xml_node(out, "utilization",
                                                        "resource", rsc->id,
                                                        "node", node->details->uname,
                                                        "function", fn,
                                                        NULL);
     g_hash_table_foreach(rsc->utilization, add_dump_node, xml_node);
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
 static int
 ticket_html(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     if (ticket->last_granted > -1) {
         char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
 
         out->list_item(out, NULL, "%s:\t%s%s %s=\"%s\"", ticket->id,
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "",
                        "last-granted", pcmk__s(epoch_str, ""));
         free(epoch_str);
     } else {
         out->list_item(out, NULL, "%s:\t%s%s", ticket->id,
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
 static int
 ticket_text(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     if (ticket->last_granted > -1) {
         char *epoch_str = pcmk__epoch2str(&(ticket->last_granted), 0);
 
         out->list_item(out, ticket->id, "%s%s %s=\"%s\"",
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "",
                        "last-granted", pcmk__s(epoch_str, ""));
         free(epoch_str);
     } else {
         out->list_item(out, ticket->id, "%s%s",
                        ticket->granted ? "granted" : "revoked",
                        ticket->standby ? " [standby]" : "");
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket", "pe_ticket_t *")
 static int
 ticket_xml(pcmk__output_t *out, va_list args) {
     pe_ticket_t *ticket = va_arg(args, pe_ticket_t *);
 
     xmlNodePtr node = NULL;
 
     node = pcmk__output_create_xml_node(out, "ticket",
                                         "id", ticket->id,
                                         "status", ticket->granted ? "granted" : "revoked",
                                         "standby", pcmk__btoa(ticket->standby),
                                         NULL);
 
     if (ticket->last_granted > -1) {
         char *buf = pcmk__epoch2str(&ticket->last_granted, 0);
 
         crm_xml_add(node, "last-granted", buf);
         free(buf);
     }
 
     return pcmk_rc_ok;
 }
 
 PCMK__OUTPUT_ARGS("ticket-list", "pe_working_set_t *", "bool")
 static int
 ticket_list(pcmk__output_t *out, va_list args) {
     pe_working_set_t *data_set = va_arg(args, pe_working_set_t *);
     bool print_spacer = va_arg(args, int);
 
     GHashTableIter iter;
     gpointer key, value;
 
     if (g_hash_table_size(data_set->tickets) == 0) {
         return pcmk_rc_no_output;
     }
 
     PCMK__OUTPUT_SPACER_IF(out, print_spacer);
 
     /* Print section heading */
     out->begin_list(out, NULL, NULL, "Tickets");
 
     /* Print each ticket */
     g_hash_table_iter_init(&iter, data_set->tickets);
     while (g_hash_table_iter_next(&iter, &key, &value)) {
         pe_ticket_t *ticket = (pe_ticket_t *) value;
         out->message(out, "ticket", ticket);
     }
 
     /* Close section */
     out->end_list(out);
     return pcmk_rc_ok;
 }
 
 static pcmk__message_entry_t fmt_functions[] = {
     { "ban", "default", ban_text },
     { "ban", "html", ban_html },
     { "ban", "xml", ban_xml },
     { "ban-list", "default", ban_list },
     { "bundle", "default", pe__bundle_text },
     { "bundle", "xml",  pe__bundle_xml },
     { "bundle", "html",  pe__bundle_html },
     { "clone", "default", pe__clone_default },
     { "clone", "xml",  pe__clone_xml },
     { "cluster-counts", "default", cluster_counts_text },
     { "cluster-counts", "html", cluster_counts_html },
     { "cluster-counts", "xml", cluster_counts_xml },
     { "cluster-dc", "default", cluster_dc_text },
     { "cluster-dc", "html", cluster_dc_html },
     { "cluster-dc", "xml", cluster_dc_xml },
     { "cluster-options", "default", cluster_options_text },
     { "cluster-options", "html", cluster_options_html },
     { "cluster-options", "log", cluster_options_log },
     { "cluster-options", "xml", cluster_options_xml },
     { "cluster-summary", "default", cluster_summary },
     { "cluster-summary", "html", cluster_summary_html },
     { "cluster-stack", "default", cluster_stack_text },
     { "cluster-stack", "html", cluster_stack_html },
     { "cluster-stack", "xml", cluster_stack_xml },
     { "cluster-times", "default", cluster_times_text },
     { "cluster-times", "html", cluster_times_html },
     { "cluster-times", "xml", cluster_times_xml },
     { "failed-action", "default", failed_action_default },
     { "failed-action", "xml", failed_action_xml },
     { "failed-action-list", "default", failed_action_list },
     { "group", "default",  pe__group_default},
     { "group", "xml",  pe__group_xml },
     { "maint-mode", "text", cluster_maint_mode_text },
     { "node", "default", node_text },
     { "node", "html", node_html },
     { "node", "xml", node_xml },
     { "node-and-op", "default", node_and_op },
     { "node-and-op", "xml", node_and_op_xml },
     { "node-capacity", "default", node_capacity },
     { "node-capacity", "xml", node_capacity_xml },
     { "node-history-list", "default", node_history_list },
     { "node-list", "default", node_list_text },
     { "node-list", "html", node_list_html },
     { "node-list", "xml", node_list_xml },
     { "node-weight", "default", node_weight },
     { "node-weight", "xml", node_weight_xml },
     { "node-attribute", "default", node_attribute_text },
     { "node-attribute", "html", node_attribute_html },
     { "node-attribute", "xml", node_attribute_xml },
     { "node-attribute-list", "default", node_attribute_list },
     { "node-summary", "default", node_summary },
     { "op-history", "default", op_history_text },
     { "op-history", "xml", op_history_xml },
     { "primitive", "default",  pe__resource_text },
     { "primitive", "xml",  pe__resource_xml },
     { "primitive", "html",  pe__resource_html },
     { "promotion-score", "default", promotion_score },
     { "promotion-score", "xml", promotion_score_xml },
     { "resource-config", "default", resource_config },
     { "resource-config", "text", resource_config_text },
     { "resource-history", "default", resource_history_text },
     { "resource-history", "xml", resource_history_xml },
     { "resource-list", "default", resource_list },
     { "resource-operation-list", "default", resource_operation_list },
     { "resource-util", "default", resource_util },
     { "resource-util", "xml", resource_util_xml },
     { "ticket", "default", ticket_text },
     { "ticket", "html", ticket_html },
     { "ticket", "xml", ticket_xml },
     { "ticket-list", "default", ticket_list },
 
     { NULL, NULL, NULL }
 };
 
 void
 pe__register_messages(pcmk__output_t *out) {
     pcmk__register_messages(out, fmt_functions);
 }
diff --git a/tools/crm_resource_runtime.c b/tools/crm_resource_runtime.c
index f47394d17f..8f619812d7 100644
--- a/tools/crm_resource_runtime.c
+++ b/tools/crm_resource_runtime.c
@@ -1,2179 +1,2179 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm_resource.h>
 #include <crm/common/ipc_attrd_internal.h>
 #include <crm/common/ipc_controld.h>
 #include <crm/common/lists_internal.h>
 #include <crm/services_internal.h>
 
 static GList *
 build_node_info_list(const pe_resource_t *rsc)
 {
     GList *retval = NULL;
 
     for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         const pe_resource_t *child = (const pe_resource_t *) iter->data;
 
         for (const GList *iter2 = child->running_on;
              iter2 != NULL; iter2 = iter2->next) {
 
             const pe_node_t *node = (const pe_node_t *) iter2->data;
             node_info_t *ni = calloc(1, sizeof(node_info_t));
 
             ni->node_name = node->details->uname;
             ni->promoted = pcmk_is_set(rsc->flags, pe_rsc_promotable) &&
                            child->fns->state(child, TRUE) == pcmk_role_promoted;
 
             retval = g_list_prepend(retval, ni);
         }
     }
 
     return retval;
 }
 
 GList *
 cli_resource_search(pe_resource_t *rsc, const char *requested_name,
                     pe_working_set_t *data_set)
 {
     GList *retval = NULL;
     const pe_resource_t *parent = pe__const_top_resource(rsc, false);
 
     if (pe_rsc_is_clone(rsc)) {
         retval = build_node_info_list(rsc);
 
     /* The anonymous clone children's common ID is supplied */
     } else if (pe_rsc_is_clone(parent)
                && !pcmk_is_set(rsc->flags, pe_rsc_unique)
                && rsc->clone_name
                && pcmk__str_eq(requested_name, rsc->clone_name, pcmk__str_casei)
                && !pcmk__str_eq(requested_name, rsc->id, pcmk__str_casei)) {
 
         retval = build_node_info_list(parent);
 
     } else if (rsc->running_on != NULL) {
         for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
             pe_node_t *node = (pe_node_t *) iter->data;
             node_info_t *ni = calloc(1, sizeof(node_info_t));
             ni->node_name = node->details->uname;
             ni->promoted = (rsc->fns->state(rsc, TRUE) == pcmk_role_promoted);
 
             retval = g_list_prepend(retval, ni);
         }
     }
 
     return retval;
 }
 
 // \return Standard Pacemaker return code
 static int
 find_resource_attr(pcmk__output_t *out, cib_t * the_cib, const char *attr,
                    const char *rsc, const char *attr_set_type, const char *set_name,
                    const char *attr_id, const char *attr_name, char **value)
 {
     int rc = pcmk_rc_ok;
     xmlNode *xml_search = NULL;
     GString *xpath = NULL;
     const char *xpath_base = NULL;
 
     if(value) {
         *value = NULL;
     }
 
     if(the_cib == NULL) {
         return ENOTCONN;
     }
 
     xpath_base = pcmk_cib_xpath_for(XML_CIB_TAG_RESOURCES);
     if (xpath_base == NULL) {
         crm_err(XML_CIB_TAG_RESOURCES " CIB element not known (bug?)");
         return ENOMSG;
     }
 
     xpath = g_string_sized_new(1024);
     pcmk__g_strcat(xpath,
                    xpath_base, "//*[@" XML_ATTR_ID "=\"", rsc, "\"]", NULL);
 
     if (attr_set_type != NULL) {
         pcmk__g_strcat(xpath, "/", attr_set_type, NULL);
         if (set_name != NULL) {
             pcmk__g_strcat(xpath, "[@" XML_ATTR_ID "=\"", set_name, "\"]",
                            NULL);
         }
     }
 
     g_string_append(xpath, "//" XML_CIB_TAG_NVPAIR "[");
     if (attr_id != NULL) {
         pcmk__g_strcat(xpath, "@" XML_ATTR_ID "=\"", attr_id, "\"", NULL);
     }
 
     if (attr_name != NULL) {
         if (attr_id != NULL) {
             g_string_append(xpath, " and ");
         }
         pcmk__g_strcat(xpath, "@" XML_NVPAIR_ATTR_NAME "=\"", attr_name, "\"",
                        NULL);
     }
     g_string_append_c(xpath, ']');
 
     rc = the_cib->cmds->query(the_cib, (const char *) xpath->str, &xml_search,
                               cib_sync_call | cib_scope_local | cib_xpath);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         goto done;
     }
 
     crm_log_xml_debug(xml_search, "Match");
     if (xml_search->children != NULL) {
         xmlNode *child = NULL;
 
         rc = ENOTUNIQ;
         out->info(out, "Multiple attributes match name=%s", attr_name);
 
         for (child = pcmk__xml_first_child(xml_search); child != NULL;
              child = pcmk__xml_next(child)) {
             out->info(out, "  Value: %s \t(id=%s)",
                       crm_element_value(child, XML_NVPAIR_ATTR_VALUE), ID(child));
         }
 
         out->spacer(out);
 
     } else if(value) {
         pcmk__str_update(value, crm_element_value(xml_search, attr));
     }
 
   done:
     g_string_free(xpath, TRUE);
     free_xml(xml_search);
     return rc;
 }
 
 /* PRIVATE. Use the find_matching_attr_resources instead. */
 static void
 find_matching_attr_resources_recursive(pcmk__output_t *out, GList/* <pe_resource_t*> */ ** result,
                                        pe_resource_t * rsc, const char * rsc_id,
                                        const char * attr_set, const char * attr_set_type,
                                        const char * attr_id, const char * attr_name,
                                        cib_t * cib, const char * cmd, int depth)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = clone_strip(rsc->id);
     char *local_attr_id = NULL;
 
     /* visit the children */
     for(GList *gIter = rsc->children; gIter; gIter = gIter->next) {
         find_matching_attr_resources_recursive(out, result, (pe_resource_t*)gIter->data,
                                                rsc_id, attr_set, attr_set_type,
                                                attr_id, attr_name, cib, cmd, depth+1);
         /* do it only once for clones */
         if(pe_clone == rsc->variant) {
             break;
         }
     }
 
     rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                             attr_set, attr_id, attr_name, &local_attr_id);
     /* Post-order traversal. 
      * The root is always on the list and it is the last item. */
     if((0 == depth) || (pcmk_rc_ok == rc)) {
         /* push the head */
         *result = g_list_append(*result, rsc);
     }
 
     free(local_attr_id);
     free(lookup_id);
 }
 
 
 /* The result is a linearized pre-ordered tree of resources. */
 static GList/*<pe_resource_t*>*/ *
 find_matching_attr_resources(pcmk__output_t *out, pe_resource_t * rsc,
                              const char * rsc_id, const char * attr_set,
                              const char * attr_set_type, const char * attr_id,
                              const char * attr_name, cib_t * cib, const char * cmd,
                              gboolean force)
 {
     int rc = pcmk_rc_ok;
     char *lookup_id = NULL;
     char *local_attr_id = NULL;
     GList * result = NULL;
     /* If --force is used, update only the requested resource (clone or primitive).
      * Otherwise, if the primitive has the attribute, use that.
      * Otherwise use the clone. */
     if(force == TRUE) {
         return g_list_append(result, rsc);
     }
     if(rsc->parent && pe_clone == rsc->parent->variant) {
         int rc = pcmk_rc_ok;
         char *local_attr_id = NULL;
         rc = find_resource_attr(out, cib, XML_ATTR_ID, rsc_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &local_attr_id);
         free(local_attr_id);
 
         if(rc != pcmk_rc_ok) {
             rsc = rsc->parent;
             out->info(out, "Performing %s of '%s' on '%s', the parent of '%s'",
                       cmd, attr_name, rsc->id, rsc_id);
         }
         return g_list_append(result, rsc);
     } else if(rsc->parent == NULL && rsc->children && pe_clone == rsc->variant) {
         pe_resource_t *child = rsc->children->data;
 
         if (child->variant == pcmk_rsc_variant_primitive) {
             lookup_id = clone_strip(child->id); /* Could be a cloned group! */
             rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                     attr_set, attr_id, attr_name, &local_attr_id);
 
             if(rc == pcmk_rc_ok) {
                 rsc = child;
                 out->info(out, "A value for '%s' already exists in child '%s', performing %s on that instead of '%s'",
                           attr_name, lookup_id, cmd, rsc_id);
             }
 
             free(local_attr_id);
             free(lookup_id);
         }
         return g_list_append(result, rsc);
     }
     /* If the resource is a group ==> children inherit the attribute if defined. */
     find_matching_attr_resources_recursive(out, &result, rsc, rsc_id, attr_set,
                                            attr_set_type, attr_id, attr_name,
                                            cib, cmd, 0);
     return result;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_update_attribute(pe_resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_set_type,
                               const char *attr_id, const char *attr_name,
                               const char *attr_value, gboolean recursive,
                               cib_t *cib, int cib_options, gboolean force)
 {
     pcmk__output_t *out = rsc->cluster->priv;
     int rc = pcmk_rc_ok;
 
     char *found_attr_id = NULL;
 
     GList/*<pe_resource_t*>*/ *resources = NULL;
     const char *top_id = pe__const_top_resource(rsc, false)->id;
 
     if ((attr_id == NULL) && !force) {
         find_resource_attr(out, cib, XML_ATTR_ID, top_id, NULL, NULL, NULL,
                            attr_name, NULL);
     }
 
     if (pcmk__str_eq(attr_set_type, XML_TAG_ATTR_SETS, pcmk__str_casei)) {
         if (!force) {
             rc = find_resource_attr(out, cib, XML_ATTR_ID, top_id,
                                     XML_TAG_META_SETS, attr_set, attr_id,
                                     attr_name, &found_attr_id);
             if ((rc == pcmk_rc_ok) && !out->is_quiet(out)) {
                 out->err(out,
                          "WARNING: There is already a meta attribute "
                          "for '%s' called '%s' (id=%s)",
                          top_id, attr_name, found_attr_id);
                 out->err(out,
                          "         Delete '%s' first or use the force option "
                          "to override", found_attr_id);
             }
             free(found_attr_id);
             if (rc == pcmk_rc_ok) {
                 return ENOTUNIQ;
             }
         }
         resources = g_list_append(resources, rsc);
 
     } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
         crm_xml_add(rsc->xml, attr_name, attr_value);
         CRM_ASSERT(cib != NULL);
         rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml,
                                 cib_options);
         rc = pcmk_legacy2rc(rc);
         if (rc == pcmk_rc_ok) {
             out->info(out, "Set attribute: name=%s value=%s",
                       attr_name, attr_value);
         }
         return rc;
 
     } else {
         resources = find_matching_attr_resources(out, rsc, requested_name,
                                                  attr_set, attr_set_type,
                                                  attr_id, attr_name, cib,
                                                  "update", force);
     }
 
     /* If the user specified attr_set or attr_id, the intent is to modify a
      * single resource, which will be the last item in the list.
      */
     if ((attr_set != NULL) || (attr_id != NULL)) {
         GList *last = g_list_last(resources);
 
         resources = g_list_remove_link(resources, last);
         g_list_free(resources);
         resources = last;
     }
 
     for (GList *iter = resources; iter != NULL; iter = iter->next) {
         char *lookup_id = NULL;
         char *local_attr_set = NULL;
         const char *rsc_attr_id = attr_id;
         const char *rsc_attr_set = attr_set;
 
         xmlNode *xml_top = NULL;
         xmlNode *xml_obj = NULL;
         found_attr_id = NULL;
 
         rsc = (pe_resource_t *) iter->data;
 
         lookup_id = clone_strip(rsc->id); /* Could be a cloned group! */
         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &found_attr_id);
 
         switch (rc) {
             case pcmk_rc_ok:
                 crm_debug("Found a match for name=%s: id=%s",
                           attr_name, found_attr_id);
                 rsc_attr_id = found_attr_id;
                 break;
 
             case ENXIO:
                 if (rsc_attr_set == NULL) {
                     local_attr_set = crm_strdup_printf("%s-%s", lookup_id,
                                                        attr_set_type);
                     rsc_attr_set = local_attr_set;
                 }
                 if (rsc_attr_id == NULL) {
                     found_attr_id = crm_strdup_printf("%s-%s",
                                                       rsc_attr_set, attr_name);
                     rsc_attr_id = found_attr_id;
                 }
 
                 xml_top = create_xml_node(NULL, (const char *) rsc->xml->name);
                 crm_xml_add(xml_top, XML_ATTR_ID, lookup_id);
 
                 xml_obj = create_xml_node(xml_top, attr_set_type);
                 crm_xml_add(xml_obj, XML_ATTR_ID, rsc_attr_set);
                 break;
 
             default:
                 free(lookup_id);
                 free(found_attr_id);
                 g_list_free(resources);
                 return rc;
         }
 
         xml_obj = crm_create_nvpair_xml(xml_obj, rsc_attr_id, attr_name,
                                         attr_value);
         if (xml_top == NULL) {
             xml_top = xml_obj;
         }
 
         crm_log_xml_debug(xml_top, "Update");
 
         rc = cib->cmds->modify(cib, XML_CIB_TAG_RESOURCES, xml_top,
                                cib_options);
         rc = pcmk_legacy2rc(rc);
         if (rc == pcmk_rc_ok) {
             out->info(out, "Set '%s' option: id=%s%s%s%s%s value=%s",
                       lookup_id, found_attr_id,
                       ((rsc_attr_set == NULL)? "" : " set="),
                       pcmk__s(rsc_attr_set, ""),
                       ((attr_name == NULL)? "" : " name="),
                       pcmk__s(attr_name, ""), attr_value);
         }
 
         free_xml(xml_top);
 
         free(lookup_id);
         free(found_attr_id);
         free(local_attr_set);
 
         if (recursive
             && pcmk__str_eq(attr_set_type, XML_TAG_META_SETS,
                             pcmk__str_casei)) {
             GList *lpc = NULL;
             static bool need_init = true;
 
             if (need_init) {
                 need_init = false;
                 pcmk__unpack_constraints(rsc->cluster);
                 pe__clear_resource_flags_on_all(rsc->cluster,
                                                 pe_rsc_detect_loop);
             }
 
             /* We want to set the attribute only on resources explicitly
              * colocated with this one, so we use rsc->rsc_cons_lhs directly
              * rather than the with_this_colocations() method.
              */
             pe__set_resource_flags(rsc, pe_rsc_detect_loop);
             for (lpc = rsc->rsc_cons_lhs; lpc != NULL; lpc = lpc->next) {
                 pcmk__colocation_t *cons = (pcmk__colocation_t *) lpc->data;
 
                 crm_debug("Checking %s %d", cons->id, cons->score);
                 if (!pcmk_is_set(cons->dependent->flags, pe_rsc_detect_loop)
                     && (cons->score > 0)) {
                     crm_debug("Setting %s=%s for dependent resource %s",
                               attr_name, attr_value, cons->dependent->id);
                     cli_resource_update_attribute(cons->dependent,
                                                   cons->dependent->id, NULL,
                                                   attr_set_type, NULL,
                                                   attr_name, attr_value,
                                                   recursive, cib, cib_options,
                                                   force);
                 }
             }
         }
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete_attribute(pe_resource_t *rsc, const char *requested_name,
                               const char *attr_set, const char *attr_set_type,
                               const char *attr_id, const char *attr_name,
                               cib_t *cib, int cib_options, gboolean force)
 {
     pcmk__output_t *out = rsc->cluster->priv;
     int rc = pcmk_rc_ok;
     GList/*<pe_resource_t*>*/ *resources = NULL;
 
     if ((attr_id == NULL) && !force) {
         find_resource_attr(out, cib, XML_ATTR_ID,
                            pe__const_top_resource(rsc, false)->id, NULL,
                            NULL, NULL, attr_name, NULL);
     }
 
     if (pcmk__str_eq(attr_set_type, XML_TAG_META_SETS, pcmk__str_casei)) {
         resources = find_matching_attr_resources(out, rsc, requested_name,
                                                  attr_set, attr_set_type,
                                                  attr_id, attr_name, cib,
                                                  "delete", force);
 
     } else if (pcmk__str_eq(attr_set_type, ATTR_SET_ELEMENT, pcmk__str_none)) {
         xml_remove_prop(rsc->xml, attr_name);
         CRM_ASSERT(cib != NULL);
         rc = cib->cmds->replace(cib, XML_CIB_TAG_RESOURCES, rsc->xml,
                                 cib_options);
         rc = pcmk_legacy2rc(rc);
         if (rc == pcmk_rc_ok) {
             out->info(out, "Deleted attribute: %s", attr_name);
         }
         return rc;
 
     } else {
         resources = g_list_append(resources, rsc);
     }
 
     for (GList *iter = resources; iter != NULL; iter = iter->next) {
         char *lookup_id = NULL;
         xmlNode *xml_obj = NULL;
         char *found_attr_id = NULL;
         const char *rsc_attr_id = attr_id;
 
         rsc = (pe_resource_t *) iter->data;
 
         lookup_id = clone_strip(rsc->id);
         rc = find_resource_attr(out, cib, XML_ATTR_ID, lookup_id, attr_set_type,
                                 attr_set, attr_id, attr_name, &found_attr_id);
         switch (rc) {
             case pcmk_rc_ok:
                 break;
 
             case ENXIO:
                 free(lookup_id);
                 rc = pcmk_rc_ok;
                 continue;
 
             default:
                 free(lookup_id);
                 g_list_free(resources);
                 return rc;
         }
 
         if (rsc_attr_id == NULL) {
             rsc_attr_id = found_attr_id;
         }
 
         xml_obj = crm_create_nvpair_xml(NULL, rsc_attr_id, attr_name, NULL);
         crm_log_xml_debug(xml_obj, "Delete");
 
         CRM_ASSERT(cib);
         rc = cib->cmds->remove(cib, XML_CIB_TAG_RESOURCES, xml_obj,
                                cib_options);
         rc = pcmk_legacy2rc(rc);
 
         if (rc == pcmk_rc_ok) {
             out->info(out, "Deleted '%s' option: id=%s%s%s%s%s",
                       lookup_id, found_attr_id,
                       ((attr_set == NULL)? "" : " set="),
                       pcmk__s(attr_set, ""),
                       ((attr_name == NULL)? "" : " name="),
                       pcmk__s(attr_name, ""));
         }
 
         free(lookup_id);
         free_xml(xml_obj);
         free(found_attr_id);
     }
     g_list_free(resources);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 send_lrm_rsc_op(pcmk_ipc_api_t *controld_api, bool do_fail_resource,
                 const char *host_uname, const char *rsc_id, pe_working_set_t *data_set)
 {
     pcmk__output_t *out = data_set->priv;
     const char *router_node = host_uname;
     const char *rsc_api_id = NULL;
     const char *rsc_long_id = NULL;
     const char *rsc_class = NULL;
     const char *rsc_provider = NULL;
     const char *rsc_type = NULL;
     bool cib_only = false;
     pe_resource_t *rsc = pe_find_resource(data_set->resources, rsc_id);
 
     if (rsc == NULL) {
         out->err(out, "Resource %s not found", rsc_id);
         return ENXIO;
 
     } else if (rsc->variant != pcmk_rsc_variant_primitive) {
         out->err(out, "We can only process primitive resources, not %s", rsc_id);
         return EINVAL;
     }
 
     rsc_class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rsc_provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
     rsc_type = crm_element_value(rsc->xml, XML_ATTR_TYPE);
     if ((rsc_class == NULL) || (rsc_type == NULL)) {
         out->err(out, "Resource %s does not have a class and type", rsc_id);
         return EINVAL;
     }
 
     {
         pe_node_t *node = pe_find_node(data_set->nodes, host_uname);
 
         if (node == NULL) {
             out->err(out, "Node %s not found", host_uname);
             return pcmk_rc_node_unknown;
         }
 
         if (!(node->details->online)) {
             if (do_fail_resource) {
                 out->err(out, "Node %s is not online", host_uname);
                 return ENOTCONN;
             } else {
                 cib_only = true;
             }
         }
         if (!cib_only && pe__is_guest_or_remote_node(node)) {
             node = pe__current_node(node->details->remote_rsc);
             if (node == NULL) {
                 out->err(out, "No cluster connection to Pacemaker Remote node %s detected",
                          host_uname);
                 return ENOTCONN;
             }
             router_node = node->details->uname;
         }
     }
 
     if (rsc->clone_name) {
         rsc_api_id = rsc->clone_name;
         rsc_long_id = rsc->id;
     } else {
         rsc_api_id = rsc->id;
     }
     if (do_fail_resource) {
         return pcmk_controld_api_fail(controld_api, host_uname, router_node,
                                       rsc_api_id, rsc_long_id,
                                       rsc_class, rsc_provider, rsc_type);
     } else {
         return pcmk_controld_api_refresh(controld_api, host_uname, router_node,
                                          rsc_api_id, rsc_long_id, rsc_class,
                                          rsc_provider, rsc_type, cib_only);
     }
 }
 
 /*!
  * \internal
  * \brief Get resource name as used in failure-related node attributes
  *
  * \param[in] rsc  Resource to check
  *
  * \return Newly allocated string containing resource's fail name
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 rsc_fail_name(const pe_resource_t *rsc)
 {
     const char *name = (rsc->clone_name? rsc->clone_name : rsc->id);
 
     return pcmk_is_set(rsc->flags, pe_rsc_unique)? strdup(name) : clone_strip(name);
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_history(pcmk_ipc_api_t *controld_api, const char *host_uname,
                   const char *rsc_id, pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
 
     /* Erase the resource's entire LRM history in the CIB, even if we're only
      * clearing a single operation's fail count. If we erased only entries for a
      * single operation, we might wind up with a wrong idea of the current
      * resource state, and we might not re-probe the resource.
      */
     rc = send_lrm_rsc_op(controld_api, false, host_uname, rsc_id, data_set);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     crm_trace("Processing %d mainloop inputs",
               pcmk_controld_api_replies_expected(controld_api));
     while (g_main_context_iteration(NULL, FALSE)) {
         crm_trace("Processed mainloop input, %d still remaining",
                   pcmk_controld_api_replies_expected(controld_api));
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_failures(pcmk__output_t *out, pcmk_ipc_api_t *controld_api,
                    const char *node_name, const char *rsc_id, const char *operation,
                    const char *interval_spec, pe_working_set_t *data_set)
 {
     int rc = pcmk_rc_ok;
     const char *failed_value = NULL;
     const char *failed_id = NULL;
     const char *interval_ms_s = NULL;
     GHashTable *rscs = NULL;
     GHashTableIter iter;
 
     /* Create a hash table to use as a set of resources to clean. This lets us
      * clean each resource only once (per node) regardless of how many failed
      * operations it has.
      */
     rscs = pcmk__strkey_table(NULL, NULL);
 
     // Normalize interval to milliseconds for comparison to history entry
     if (operation) {
         interval_ms_s = crm_strdup_printf("%u",
                                           crm_parse_interval_spec(interval_spec));
     }
 
     for (xmlNode *xml_op = pcmk__xml_first_child(data_set->failed);
          xml_op != NULL;
          xml_op = pcmk__xml_next(xml_op)) {
 
         failed_id = crm_element_value(xml_op, XML_LRM_ATTR_RSCID);
         if (failed_id == NULL) {
             // Malformed history entry, should never happen
             continue;
         }
 
         // No resource specified means all resources match
         if (rsc_id) {
             pe_resource_t *fail_rsc = pe_find_resource_with_flags(data_set->resources,
                                                                   failed_id,
                                                                   pe_find_renamed|pe_find_anon);
 
             if (!fail_rsc || !pcmk__str_eq(rsc_id, fail_rsc->id, pcmk__str_casei)) {
                 continue;
             }
         }
 
         // Host name should always have been provided by this point
         failed_value = crm_element_value(xml_op, XML_ATTR_UNAME);
         if (!pcmk__str_eq(node_name, failed_value, pcmk__str_casei)) {
             continue;
         }
 
         // No operation specified means all operations match
         if (operation) {
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_TASK);
             if (!pcmk__str_eq(operation, failed_value, pcmk__str_casei)) {
                 continue;
             }
 
             // Interval (if operation was specified) defaults to 0 (not all)
             failed_value = crm_element_value(xml_op, XML_LRM_ATTR_INTERVAL_MS);
             if (!pcmk__str_eq(interval_ms_s, failed_value, pcmk__str_casei)) {
                 continue;
             }
         }
 
         g_hash_table_add(rscs, (gpointer) failed_id);
     }
 
     g_hash_table_iter_init(&iter, rscs);
     while (g_hash_table_iter_next(&iter, (gpointer *) &failed_id, NULL)) {
         crm_debug("Erasing failures of %s on %s", failed_id, node_name);
         rc = clear_rsc_history(controld_api, node_name, failed_id, data_set);
         if (rc != pcmk_rc_ok) {
             return rc;
         }
     }
     g_hash_table_destroy(rscs);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 clear_rsc_fail_attrs(const pe_resource_t *rsc, const char *operation,
                      const char *interval_spec, const pe_node_t *node)
 {
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     char *rsc_name = rsc_fail_name(rsc);
 
     if (pe__is_guest_or_remote_node(node)) {
         attr_options |= pcmk__node_attr_remote;
     }
 
     rc = pcmk__attrd_api_clear_failures(NULL, node->details->uname, rsc_name,
                                         operation, interval_spec, NULL,
                                         attr_options);
     free(rsc_name);
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_delete(pcmk_ipc_api_t *controld_api, const char *host_uname,
                     const pe_resource_t *rsc, const char *operation,
                     const char *interval_spec, bool just_failures,
                     pe_working_set_t *data_set, gboolean force)
 {
     pcmk__output_t *out = data_set->priv;
     int rc = pcmk_rc_ok;
     pe_node_t *node = NULL;
 
     if (rsc == NULL) {
         return ENXIO;
 
     } else if (rsc->children) {
 
         for (const GList *lpc = rsc->children; lpc != NULL; lpc = lpc->next) {
             const pe_resource_t *child = (const pe_resource_t *) lpc->data;
 
             rc = cli_resource_delete(controld_api, host_uname, child, operation,
                                      interval_spec, just_failures, data_set, force);
             if (rc != pcmk_rc_ok) {
                 return rc;
             }
         }
         return pcmk_rc_ok;
 
     } else if (host_uname == NULL) {
         GList *lpc = NULL;
         GList *nodes = g_hash_table_get_values(rsc->known_on);
 
         if(nodes == NULL && force) {
             nodes = pcmk__copy_node_list(data_set->nodes, false);
 
         } else if(nodes == NULL && rsc->exclusive_discover) {
             GHashTableIter iter;
             pe_node_t *node = NULL;
 
             g_hash_table_iter_init(&iter, rsc->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (void**)&node)) {
                 if(node->weight >= 0) {
                     nodes = g_list_prepend(nodes, node);
                 }
             }
 
         } else if(nodes == NULL) {
             nodes = g_hash_table_get_values(rsc->allowed_nodes);
         }
 
         for (lpc = nodes; lpc != NULL; lpc = lpc->next) {
             node = (pe_node_t *) lpc->data;
 
             if (node->details->online) {
                 rc = cli_resource_delete(controld_api, node->details->uname, rsc,
                                          operation, interval_spec, just_failures,
                                          data_set, force);
             }
             if (rc != pcmk_rc_ok) {
                 g_list_free(nodes);
                 return rc;
             }
         }
 
         g_list_free(nodes);
         return pcmk_rc_ok;
     }
 
     node = pe_find_node(data_set->nodes, host_uname);
 
     if (node == NULL) {
         out->err(out, "Unable to clean up %s because node %s not found",
                  rsc->id, host_uname);
         return ENODEV;
     }
 
     if (!node->details->rsc_discovery_enabled) {
         out->err(out, "Unable to clean up %s because resource discovery disabled on %s",
                  rsc->id, host_uname);
         return EOPNOTSUPP;
     }
 
     if (controld_api == NULL) {
         out->err(out, "Dry run: skipping clean-up of %s on %s due to CIB_file",
                  rsc->id, host_uname);
         return pcmk_rc_ok;
     }
 
     rc = clear_rsc_fail_attrs(rsc, operation, interval_spec, node);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up %s failures on %s: %s",
                  rsc->id, host_uname, pcmk_rc_str(rc));
         return rc;
     }
 
     if (just_failures) {
         rc = clear_rsc_failures(out, controld_api, host_uname, rsc->id, operation,
                                 interval_spec, data_set);
     } else {
         rc = clear_rsc_history(controld_api, host_uname, rsc->id, data_set);
     }
     if (rc != pcmk_rc_ok) {
         out->err(out, "Cleaned %s failures on %s, but unable to clean history: %s",
                  rsc->id, host_uname, pcmk_strerror(rc));
     } else {
         out->info(out, "Cleaned up %s on %s", rsc->id, host_uname);
     }
     return rc;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_cleanup_all(pcmk_ipc_api_t *controld_api, const char *node_name,
                 const char *operation, const char *interval_spec,
                 pe_working_set_t *data_set)
 {
     pcmk__output_t *out = data_set->priv;
     int rc = pcmk_rc_ok;
     int attr_options = pcmk__node_attr_none;
     const char *display_name = node_name? node_name : "all nodes";
 
     if (controld_api == NULL) {
         out->info(out, "Dry run: skipping clean-up of %s due to CIB_file",
                   display_name);
         return rc;
     }
 
     if (node_name) {
         pe_node_t *node = pe_find_node(data_set->nodes, node_name);
 
         if (node == NULL) {
             out->err(out, "Unknown node: %s", node_name);
             return ENXIO;
         }
         if (pe__is_guest_or_remote_node(node)) {
             attr_options |= pcmk__node_attr_remote;
         }
     }
 
     rc = pcmk__attrd_api_clear_failures(NULL, node_name, NULL, operation,
                                         interval_spec, NULL, attr_options);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Unable to clean up all failures on %s: %s",
                  display_name, pcmk_rc_str(rc));
         return rc;
     }
 
     if (node_name) {
         rc = clear_rsc_failures(out, controld_api, node_name, NULL,
                                 operation, interval_spec, data_set);
         if (rc != pcmk_rc_ok) {
             out->err(out, "Cleaned all resource failures on %s, but unable to clean history: %s",
                      node_name, pcmk_strerror(rc));
             return rc;
         }
     } else {
         for (GList *iter = data_set->nodes; iter; iter = iter->next) {
             pe_node_t *node = (pe_node_t *) iter->data;
 
             rc = clear_rsc_failures(out, controld_api, node->details->uname, NULL,
                                     operation, interval_spec, data_set);
             if (rc != pcmk_rc_ok) {
                 out->err(out, "Cleaned all resource failures on all nodes, but unable to clean history: %s",
                          pcmk_strerror(rc));
                 return rc;
             }
         }
     }
 
     out->info(out, "Cleaned up all resources on %s", display_name);
     return rc;
 }
 
 static void
 check_role(resource_checks_t *checks)
 {
     const char *role_s = g_hash_table_lookup(checks->rsc->meta,
                                              XML_RSC_ATTR_TARGET_ROLE);
 
     if (role_s == NULL) {
         return;
     }
     switch (text2role(role_s)) {
         case pcmk_role_stopped:
             checks->flags |= rsc_remain_stopped;
             break;
 
         case pcmk_role_unpromoted:
             if (pcmk_is_set(pe__const_top_resource(checks->rsc, false)->flags,
                             pe_rsc_promotable)) {
                 checks->flags |= rsc_unpromotable;
             }
             break;
 
         default:
             break;
     }
 }
 
 static void
 check_managed(resource_checks_t *checks)
 {
     const char *managed_s = g_hash_table_lookup(checks->rsc->meta,
                                                 XML_RSC_ATTR_MANAGED);
 
     if ((managed_s != NULL) && !crm_is_true(managed_s)) {
         checks->flags |= rsc_unmanaged;
     }
 }
 
 static void
 check_locked(resource_checks_t *checks)
 {
     if (checks->rsc->lock_node != NULL) {
         checks->flags |= rsc_locked;
         checks->lock_node = checks->rsc->lock_node->details->uname;
     }
 }
 
 static bool
 node_is_unhealthy(pe_node_t *node)
 {
     switch (pe__health_strategy(node->details->data_set)) {
         case pcmk__health_strategy_none:
             break;
 
         case pcmk__health_strategy_no_red:
             if (pe__node_health(node) < 0) {
                 return true;
             }
             break;
 
         case pcmk__health_strategy_only_green:
             if (pe__node_health(node) <= 0) {
                 return true;
             }
             break;
 
         case pcmk__health_strategy_progressive:
         case pcmk__health_strategy_custom:
             /* @TODO These are finite scores, possibly with rules, and possibly
              * combining with other scores, so attributing these as a cause is
              * nontrivial.
              */
             break;
     }
     return false;
 }
 
 static void
 check_node_health(resource_checks_t *checks, pe_node_t *node)
 {
     if (node == NULL) {
         GHashTableIter iter;
         bool allowed = false;
         bool all_nodes_unhealthy = true;
 
         g_hash_table_iter_init(&iter, checks->rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             allowed = true;
             if (!node_is_unhealthy(node)) {
                 all_nodes_unhealthy = false;
                 break;
             }
         }
         if (allowed && all_nodes_unhealthy) {
             checks->flags |= rsc_node_health;
         }
 
     } else if (node_is_unhealthy(node)) {
         checks->flags |= rsc_node_health;
     }
 }
 
 int
 cli_resource_check(pcmk__output_t *out, pe_resource_t *rsc, pe_node_t *node)
 {
     resource_checks_t checks = { .rsc = rsc };
 
     check_role(&checks);
     check_managed(&checks);
     check_locked(&checks);
     check_node_health(&checks, node);
 
     return out->message(out, "resource-check-list", &checks);
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_fail(pcmk_ipc_api_t *controld_api, const char *host_uname,
                   const char *rsc_id, pe_working_set_t *data_set)
 {
     crm_notice("Failing %s on %s", rsc_id, host_uname);
     return send_lrm_rsc_op(controld_api, true, host_uname, rsc_id, data_set);
 }
 
 static GHashTable *
 generate_resource_params(pe_resource_t *rsc, pe_node_t *node,
                          pe_working_set_t *data_set)
 {
     GHashTable *params = NULL;
     GHashTable *meta = NULL;
     GHashTable *combined = NULL;
     GHashTableIter iter;
     char *key = NULL;
     char *value = NULL;
 
     combined = pcmk__strkey_table(free, free);
 
     params = pe_rsc_params(rsc, node, data_set);
     if (params != NULL) {
         g_hash_table_iter_init(&iter, params);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             g_hash_table_insert(combined, strdup(key), strdup(value));
         }
     }
 
     meta = pcmk__strkey_table(free, free);
     get_meta_attributes(meta, rsc, node, data_set);
     if (meta != NULL) {
         g_hash_table_iter_init(&iter, meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) & key, (gpointer *) & value)) {
             char *crm_name = crm_meta_name(key);
 
             g_hash_table_insert(combined, crm_name, strdup(value));
         }
         g_hash_table_destroy(meta);
     }
 
     return combined;
 }
 
 bool resource_is_running_on(pe_resource_t *rsc, const char *host)
 {
     bool found = true;
     GList *hIter = NULL;
     GList *hosts = NULL;
 
     if (rsc == NULL) {
         return false;
     }
 
     rsc->fns->location(rsc, &hosts, TRUE);
     for (hIter = hosts; host != NULL && hIter != NULL; hIter = hIter->next) {
         pe_node_t *node = (pe_node_t *) hIter->data;
 
         if (pcmk__strcase_any_of(host, node->details->uname, node->details->id, NULL)) {
             crm_trace("Resource %s is running on %s\n", rsc->id, host);
             goto done;
         }
     }
 
     if (host != NULL) {
         crm_trace("Resource %s is not running on: %s\n", rsc->id, host);
         found = false;
 
     } else if(host == NULL && hosts == NULL) {
         crm_trace("Resource %s is not running\n", rsc->id);
         found = false;
     }
 
   done:
     g_list_free(hosts);
     return found;
 }
 
 /*!
  * \internal
  * \brief Create a list of all resources active on host from a given list
  *
  * \param[in] host      Name of host to check whether resources are active
  * \param[in] rsc_list  List of resources to check
  *
  * \return New list of resources from list that are active on host
  */
 static GList *
 get_active_resources(const char *host, GList *rsc_list)
 {
     GList *rIter = NULL;
     GList *active = NULL;
 
     for (rIter = rsc_list; rIter != NULL; rIter = rIter->next) {
         pe_resource_t *rsc = (pe_resource_t *) rIter->data;
 
         /* Expand groups to their members, because if we're restarting a member
          * other than the first, we can't otherwise tell which resources are
          * stopping and starting.
          */
-        if (rsc->variant == pe_group) {
+        if (rsc->variant == pcmk_rsc_variant_group) {
             active = g_list_concat(active,
                                    get_active_resources(host, rsc->children));
         } else if (resource_is_running_on(rsc, host)) {
             active = g_list_append(active, strdup(rsc->id));
         }
     }
     return active;
 }
 
 static void dump_list(GList *items, const char *tag) 
 {
     int lpc = 0;
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         crm_trace("%s[%d]: %s", tag, lpc, (char*)item->data);
         lpc++;
     }
 }
 
 static void display_list(pcmk__output_t *out, GList *items, const char *tag)
 {
     GList *item = NULL;
 
     for (item = items; item != NULL; item = item->next) {
         out->info(out, "%s%s", tag, (const char *)item->data);
     }
 }
 
 /*!
  * \internal
  * \brief Upgrade XML to latest schema version and use it as working set input
  *
  * This also updates the working set timestamp to the current time.
  *
  * \param[in,out] data_set  Working set instance to update
  * \param[in,out] xml       XML to use as input
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->now.
  * \todo This follows the example of other callers of cli_config_update()
  *       and returns ENOKEY ("Required key not available") if that fails,
  *       but perhaps pcmk_rc_schema_validation would be better in that case.
  */
 int
 update_working_set_xml(pe_working_set_t *data_set, xmlNode **xml)
 {
     if (cli_config_update(xml, NULL, FALSE) == FALSE) {
         return ENOKEY;
     }
     data_set->input = *xml;
     data_set->now = crm_time_new(NULL);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Update a working set's XML input based on a CIB query
  *
  * \param[in] data_set   Data set instance to initialize
  * \param[in] cib        Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  * \note On success, caller is responsible for freeing memory allocated for
  *       data_set->input and data_set->now.
  */
 static int
 update_working_set_from_cib(pcmk__output_t *out, pe_working_set_t * data_set,
                             cib_t *cib)
 {
     xmlNode *cib_xml_copy = NULL;
     int rc = pcmk_rc_ok;
 
     rc = cib->cmds->query(cib, NULL, &cib_xml_copy, cib_scope_local | cib_sync_call);
     rc = pcmk_legacy2rc(rc);
 
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not obtain the current CIB: %s (%d)", pcmk_strerror(rc), rc);
         return rc;
     }
     rc = update_working_set_xml(data_set, &cib_xml_copy);
     if (rc != pcmk_rc_ok) {
         out->err(out, "Could not upgrade the current CIB XML");
         free_xml(cib_xml_copy);
         return rc;
     }
 
     return rc;
 }
 
 // \return Standard Pacemaker return code
 static int
 update_dataset(cib_t *cib, pe_working_set_t * data_set, bool simulate)
 {
     char *pid = NULL;
     char *shadow_file = NULL;
     cib_t *shadow_cib = NULL;
     int rc = pcmk_rc_ok;
 
     pcmk__output_t *out = data_set->priv;
 
     pe_reset_working_set(data_set);
     pe__set_working_set_flags(data_set, pe_flag_no_counts|pe_flag_no_compat);
     rc = update_working_set_from_cib(out, data_set, cib);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     if(simulate) {
         bool prev_quiet = false;
 
         pid = pcmk__getpid_s();
         shadow_cib = cib_shadow_new(pid);
         shadow_file = get_shadow_file(pid);
 
         if (shadow_cib == NULL) {
             out->err(out, "Could not create shadow cib: '%s'", pid);
             rc = ENXIO;
             goto done;
         }
 
         rc = write_xml_file(data_set->input, shadow_file, FALSE);
 
         if (rc < 0) {
             out->err(out, "Could not populate shadow cib: %s (%d)", pcmk_strerror(rc), rc);
             goto done;
         }
 
         rc = shadow_cib->cmds->signon(shadow_cib, crm_system_name, cib_command);
         rc = pcmk_legacy2rc(rc);
 
         if (rc != pcmk_rc_ok) {
             out->err(out, "Could not connect to shadow cib: %s (%d)", pcmk_strerror(rc), rc);
             goto done;
         }
 
         pcmk__schedule_actions(data_set->input,
                                pe_flag_no_counts|pe_flag_no_compat, data_set);
 
         prev_quiet = out->is_quiet(out);
         out->quiet = true;
         pcmk__simulate_transition(data_set, shadow_cib, NULL);
         out->quiet = prev_quiet;
 
         rc = update_dataset(shadow_cib, data_set, false);
 
     } else {
         cluster_status(data_set);
     }
 
   done:
     /* Do not free data_set->input here, we need rsc->xml to be valid later on */
     cib_delete(shadow_cib);
     free(pid);
 
     if(shadow_file) {
         unlink(shadow_file);
         free(shadow_file);
     }
 
     return rc;
 }
 
 static int
 max_delay_for_resource(pe_working_set_t * data_set, pe_resource_t *rsc) 
 {
     int delay = 0;
     int max_delay = 0;
 
     if(rsc && rsc->children) {
         GList *iter = NULL;
 
         for(iter = rsc->children; iter; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *)iter->data;
 
             delay = max_delay_for_resource(data_set, child);
             if(delay > max_delay) {
                 double seconds = delay / 1000.0;
                 crm_trace("Calculated new delay of %.1fs due to %s", seconds, child->id);
                 max_delay = delay;
             }
         }
 
     } else if(rsc) {
         char *key = crm_strdup_printf("%s_%s_0", rsc->id, PCMK_ACTION_STOP);
         pe_action_t *stop = custom_action(rsc, key, PCMK_ACTION_STOP, NULL,
                                           TRUE, FALSE, data_set);
         const char *value = g_hash_table_lookup(stop->meta, XML_ATTR_TIMEOUT);
         long long result_ll;
 
         if ((pcmk__scan_ll(value, &result_ll, -1LL) == pcmk_rc_ok)
             && (result_ll >= 0) && (result_ll <= INT_MAX)) {
             max_delay = (int) result_ll;
         } else {
             max_delay = -1;
         }
         pe_free_action(stop);
     }
 
     return max_delay;
 }
 
 static int
 max_delay_in(pe_working_set_t * data_set, GList *resources) 
 {
     int max_delay = 0;
     GList *item = NULL;
 
     for (item = resources; item != NULL; item = item->next) {
         int delay = 0;
         pe_resource_t *rsc = pe_find_resource(data_set->resources, (const char *)item->data);
 
         delay = max_delay_for_resource(data_set, rsc);
 
         if(delay > max_delay) {
             double seconds = delay / 1000.0;
             crm_trace("Calculated new delay of %.1fs due to %s", seconds, rsc->id);
             max_delay = delay;
         }
     }
 
     return 5 + (max_delay / 1000);
 }
 
 #define waiting_for_starts(d, r, h) ((d != NULL) || \
                                     (!resource_is_running_on((r), (h))))
 
 /*!
  * \internal
  * \brief Restart a resource (on a particular host if requested).
  *
  * \param[in,out] out                 Output object
  * \param[in,out] rsc                 The resource to restart
  * \param[in]     node                Node to restart resource on (NULL for all)
  * \param[in]     move_lifetime       If not NULL, how long constraint should
  *                                    remain in effect (as ISO 8601 string)
  * \param[in]     timeout_ms          Consider failed if actions do not complete
  *                                    in this time (specified in milliseconds,
  *                                    but a two-second granularity is actually
  *                                    used; if 0, it will be calculated based on
  *                                    the resource timeout)
  * \param[in,out] cib                 Connection to the CIB manager
  * \param[in]     cib_options         Group of enum cib_call_options flags to
  *                                    use with CIB calls
  * \param[in]     promoted_role_only  If true, limit to promoted instances
  * \param[in]     force               If true, apply only to requested instance
  *                                    if part of a collective resource
  *
  * \return Standard Pacemaker return code (exits on certain failures)
  */
 int
 cli_resource_restart(pcmk__output_t *out, pe_resource_t *rsc,
                      const pe_node_t *node, const char *move_lifetime,
                      int timeout_ms, cib_t *cib, int cib_options,
                      gboolean promoted_role_only, gboolean force)
 {
     int rc = pcmk_rc_ok;
     int lpc = 0;
     int before = 0;
     int step_timeout_s = 0;
     int sleep_interval = 2;
     int timeout = timeout_ms / 1000;
 
     bool stop_via_ban = false;
     char *rsc_id = NULL;
     char *lookup_id = NULL;
     char *orig_target_role = NULL;
 
     GList *list_delta = NULL;
     GList *target_active = NULL;
     GList *current_active = NULL;
     GList *restart_target_active = NULL;
 
     pe_working_set_t *data_set = NULL;
     pe_resource_t *parent = uber_parent(rsc);
 
     bool running = false;
     const char *id = rsc->clone_name ? rsc->clone_name : rsc->id;
     const char *host = node ? node->details->uname : NULL;
 
     /* If the implicit resource or primitive resource of a bundle is given, operate on the
      * bundle itself instead.
      */
     if (pe_rsc_is_bundled(rsc)) {
         rsc = parent->parent;
     }
 
     running = resource_is_running_on(rsc, host);
 
     if (pe_rsc_is_clone(parent) && !running) {
         if (pe_rsc_is_unique_clone(parent)) {
             lookup_id = strdup(rsc->id);
         } else {
             lookup_id = clone_strip(rsc->id);
         }
 
         rsc = parent->fns->find_rsc(parent, lookup_id, node, pe_find_any|pe_find_current);
         free(lookup_id);
         running = resource_is_running_on(rsc, host);
     }
 
     if (!running) {
         if (host) {
             out->err(out, "%s is not running on %s and so cannot be restarted", id, host);
         } else {
             out->err(out, "%s is not running anywhere and so cannot be restarted", id);
         }
         return ENXIO;
     }
 
     rsc_id = strdup(rsc->id);
 
     if (pe_rsc_is_unique_clone(parent)) {
         lookup_id = strdup(rsc->id);
     } else {
         lookup_id = clone_strip(rsc->id);
     }
 
     if (host) {
         if (pe_rsc_is_clone(rsc) || pe_bundle_replicas(rsc)) {
             stop_via_ban = true;
         } else if (pe_rsc_is_clone(parent)) {
             stop_via_ban = true;
             free(lookup_id);
             lookup_id = strdup(parent->id);
         }
     }
 
     /*
       grab full cib
       determine originally active resources
       disable or ban
       poll cib and watch for affected resources to get stopped
       without --timeout, calculate the stop timeout for each step and wait for that
       if we hit --timeout or the service timeout, re-enable or un-ban, report failure and indicate which resources we couldn't take down
       if everything stopped, re-enable or un-ban
       poll cib and watch for affected resources to get started
       without --timeout, calculate the start timeout for each step and wait for that
       if we hit --timeout or the service timeout, report (different) failure and indicate which resources we couldn't bring back up
       report success
 
       Optimizations:
       - use constraints to determine ordered list of affected resources
       - Allow a --no-deps option (aka. --force-restart)
     */
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         crm_perror(LOG_ERR, "Could not allocate working set");
         rc = ENOMEM;
         goto done;
     }
 
     data_set->priv = out;
     rc = update_dataset(cib, data_set, false);
 
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not get new resource list: %s (%d)", pcmk_strerror(rc), rc);
         goto done;
     }
 
     restart_target_active = get_active_resources(host, data_set->resources);
     current_active = get_active_resources(host, data_set->resources);
 
     dump_list(current_active, "Origin");
 
     if (stop_via_ban) {
         /* Stop the clone or bundle instance by banning it from the host */
         out->quiet = true;
         rc = cli_resource_ban(out, lookup_id, host, move_lifetime, NULL, cib,
                               cib_options, promoted_role_only);
 
     } else {
         /* Stop the resource by setting target-role to Stopped.
          * Remember any existing target-role so we can restore it later
          * (though it only makes any difference if it's Unpromoted).
          */
 
         find_resource_attr(out, cib, XML_NVPAIR_ATTR_VALUE, lookup_id, NULL, NULL,
                            NULL, XML_RSC_ATTR_TARGET_ROLE, &orig_target_role);
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
                                            PCMK_ACTION_STOPPED, FALSE, cib,
                                            cib_options, force);
     }
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not set target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
         if (current_active != NULL) {
             g_list_free_full(current_active, free);
             current_active = NULL;
         }
         if (restart_target_active != NULL) {
             g_list_free_full(restart_target_active, free);
             restart_target_active = NULL;
         }
         goto done;
     }
 
     rc = update_dataset(cib, data_set, true);
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not determine which resources would be stopped");
         goto failure;
     }
 
     target_active = get_active_resources(host, data_set->resources);
     dump_list(target_active, "Target");
 
     list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to stop:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (list_delta != NULL) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for(lpc = 0; (lpc < step_timeout_s) && (list_delta != NULL); lpc++) {
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
             rc = update_dataset(cib, data_set, FALSE);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were stopped");
                 goto failure;
             }
 
             if (current_active != NULL) {
                 g_list_free_full(current_active, free);
                 current_active = NULL;
             }
             current_active = get_active_resources(host, data_set->resources);
             g_list_free(list_delta);
             list_delta = NULL;
             list_delta = pcmk__subtract_lists(current_active, target_active, (GCompareFunc) strcmp);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         crm_trace("%d (was %d) resources remaining", g_list_length(list_delta), before);
         if(before == g_list_length(list_delta)) {
             /* aborted during stop phase, print the contents of list_delta */
             out->err(out, "Could not complete shutdown of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     if (stop_via_ban) {
         rc = cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
 
     } else if (orig_target_role) {
         rc = cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE,
                                            orig_target_role, FALSE, cib,
                                            cib_options, force);
         free(orig_target_role);
         orig_target_role = NULL;
     } else {
         rc = cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                            NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
                                            cib_options, force);
     }
 
     if(rc != pcmk_rc_ok) {
         out->err(out, "Could not unset target-role for %s: %s (%d)", rsc_id, pcmk_strerror(rc), rc);
         goto done;
     }
 
     if (target_active != NULL) {
         g_list_free_full(target_active, free);
         target_active = NULL;
     }
     target_active = restart_target_active;
     list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
     out->info(out, "Waiting for %d resources to start again:", g_list_length(list_delta));
     display_list(out, list_delta, " * ");
 
     step_timeout_s = timeout / sleep_interval;
     while (waiting_for_starts(list_delta, rsc, host)) {
         before = g_list_length(list_delta);
         if(timeout_ms == 0) {
             step_timeout_s = max_delay_in(data_set, list_delta) / sleep_interval;
         }
 
         /* We probably don't need the entire step timeout */
         for (lpc = 0; (lpc < step_timeout_s) && waiting_for_starts(list_delta, rsc, host); lpc++) {
 
             sleep(sleep_interval);
             if(timeout) {
                 timeout -= sleep_interval;
                 crm_trace("%ds remaining", timeout);
             }
 
             rc = update_dataset(cib, data_set, false);
             if(rc != pcmk_rc_ok) {
                 out->err(out, "Could not determine which resources were started");
                 goto failure;
             }
 
             if (current_active != NULL) {
                 g_list_free_full(current_active, free);
                 current_active = NULL;
             }
 
             /* It's OK if dependent resources moved to a different node,
              * so we check active resources on all nodes.
              */
             current_active = get_active_resources(NULL, data_set->resources);
             g_list_free(list_delta);
             list_delta = pcmk__subtract_lists(target_active, current_active, (GCompareFunc) strcmp);
             dump_list(current_active, "Current");
             dump_list(list_delta, "Delta");
         }
 
         if(before == g_list_length(list_delta)) {
             /* aborted during start phase, print the contents of list_delta */
             out->err(out, "Could not complete restart of %s, %d resources remaining", rsc_id, g_list_length(list_delta));
             display_list(out, list_delta, " * ");
             rc = ETIME;
             goto failure;
         }
 
     }
 
     rc = pcmk_rc_ok;
     goto done;
 
   failure:
     if (stop_via_ban) {
         cli_resource_clear(lookup_id, host, NULL, cib, cib_options, true, force);
     } else if (orig_target_role) {
         cli_resource_update_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS, NULL,
                                       XML_RSC_ATTR_TARGET_ROLE, orig_target_role,
                                       FALSE, cib, cib_options, force);
         free(orig_target_role);
     } else {
         cli_resource_delete_attribute(rsc, rsc_id, NULL, XML_TAG_META_SETS,
                                       NULL, XML_RSC_ATTR_TARGET_ROLE, cib,
                                       cib_options, force);
     }
 
 done:
     if (list_delta != NULL) {
         g_list_free(list_delta);
     }
     if (current_active != NULL) {
         g_list_free_full(current_active, free);
     }
     if (target_active != NULL && (target_active != restart_target_active)) {
         g_list_free_full(target_active, free);
     }
     if (restart_target_active != NULL) {
         g_list_free_full(restart_target_active, free);
     }
     free(rsc_id);
     free(lookup_id);
     pe_free_working_set(data_set);
     return rc;
 }
 
 static inline bool
 action_is_pending(const pe_action_t *action)
 {
     if (pcmk_any_flags_set(action->flags, pe_action_optional|pe_action_pseudo)
         || !pcmk_is_set(action->flags, pe_action_runnable)
         || pcmk__str_eq(PCMK_ACTION_NOTIFY, action->task, pcmk__str_casei)) {
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether any actions in a list are pending
  *
  * \param[in] actions   List of actions to check
  *
  * \return true if any actions in the list are pending, otherwise false
  */
 static bool
 actions_are_pending(const GList *actions)
 {
     for (const GList *action = actions; action != NULL; action = action->next) {
         const pe_action_t *a = (const pe_action_t *) action->data;
 
         if (action_is_pending(a)) {
             crm_notice("Waiting for %s (flags=%#.8x)", a->uuid, a->flags);
             return true;
         }
     }
     return false;
 }
 
 static void
 print_pending_actions(pcmk__output_t *out, GList *actions)
 {
     GList *action;
 
     out->info(out, "Pending actions:");
     for (action = actions; action != NULL; action = action->next) {
         pe_action_t *a = (pe_action_t *) action->data;
 
         if (!action_is_pending(a)) {
             continue;
         }
 
         if (a->node) {
             out->info(out, "\tAction %d: %s\ton %s",
                       a->id, a->uuid, pe__node_name(a->node));
         } else {
             out->info(out, "\tAction %d: %s", a->id, a->uuid);
         }
     }
 }
 
 /* For --wait, timeout (in seconds) to use if caller doesn't specify one */
 #define WAIT_DEFAULT_TIMEOUT_S (60 * 60)
 
 /* For --wait, how long to sleep between cluster state checks */
 #define WAIT_SLEEP_S (2)
 
 /*!
  * \internal
  * \brief Wait until all pending cluster actions are complete
  *
  * This waits until either the CIB's transition graph is idle or a timeout is
  * reached.
  *
  * \param[in,out] out          Output object
  * \param[in]     timeout_ms   Consider failed if actions do not complete in
  *                             this time (specified in milliseconds, but
  *                             one-second granularity is actually used; if 0, a
  *                             default will be used)
  * \param[in,out] cib          Connection to the CIB manager
  *
  * \return Standard Pacemaker return code
  */
 int
 wait_till_stable(pcmk__output_t *out, int timeout_ms, cib_t * cib)
 {
     pe_working_set_t *data_set = NULL;
     int rc = pcmk_rc_ok;
     int timeout_s = timeout_ms? ((timeout_ms + 999) / 1000) : WAIT_DEFAULT_TIMEOUT_S;
     time_t expire_time = time(NULL) + timeout_s;
     time_t time_diff;
     bool printed_version_warning = out->is_quiet(out); // i.e. don't print if quiet
 
     data_set = pe_new_working_set();
     if (data_set == NULL) {
         return ENOMEM;
     }
 
     do {
 
         /* Abort if timeout is reached */
         time_diff = expire_time - time(NULL);
         if (time_diff > 0) {
             crm_info("Waiting up to %lld seconds for cluster actions to complete", (long long) time_diff);
         } else {
             print_pending_actions(out, data_set->actions);
             pe_free_working_set(data_set);
             return ETIME;
         }
         if (rc == pcmk_rc_ok) { /* this avoids sleep on first loop iteration */
             sleep(WAIT_SLEEP_S);
         }
 
         /* Get latest transition graph */
         pe_reset_working_set(data_set);
         rc = update_working_set_from_cib(out, data_set, cib);
         if (rc != pcmk_rc_ok) {
             pe_free_working_set(data_set);
             return rc;
         }
         pcmk__schedule_actions(data_set->input,
                                pe_flag_no_counts|pe_flag_no_compat, data_set);
 
         if (!printed_version_warning) {
             /* If the DC has a different version than the local node, the two
              * could come to different conclusions about what actions need to be
              * done. Warn the user in this case.
              *
              * @TODO A possible long-term solution would be to reimplement the
              * wait as a new controller operation that would be forwarded to the
              * DC. However, that would have potential problems of its own.
              */
             const char *dc_version = g_hash_table_lookup(data_set->config_hash,
                                                          "dc-version");
 
             if (!pcmk__str_eq(dc_version, PACEMAKER_VERSION "-" BUILD_VERSION, pcmk__str_casei)) {
                 out->info(out, "warning: wait option may not work properly in "
                           "mixed-version cluster");
                 printed_version_warning = true;
             }
         }
 
     } while (actions_are_pending(data_set->actions));
 
     pe_free_working_set(data_set);
     return rc;
 }
 
 static const char *
 get_action(const char *rsc_action) {
     const char *action = NULL;
 
     if (pcmk__str_eq(rsc_action, "validate", pcmk__str_casei)) {
         action = PCMK_ACTION_VALIDATE_ALL;
 
     } else if (pcmk__str_eq(rsc_action, "force-check", pcmk__str_casei)) {
         action = PCMK_ACTION_MONITOR;
 
     } else if (pcmk__strcase_any_of(rsc_action, "force-start", "force-stop",
                                     "force-demote", "force-promote", NULL)) {
         action = rsc_action+6;
     } else {
         action = rsc_action;
     }
 
     return action;
 }
 
 /*!
  * \brief Set up environment variables as expected by resource agents
  *
  * When the cluster executes resource agents, it adds certain environment
  * variables (directly or via resource meta-attributes) expected by some
  * resource agents. Add the essential ones that many resource agents expect, so
  * the behavior is the same for command-line execution.
  *
  * \param[in,out] params       Resource parameters that will be passed to agent
  * \param[in]     timeout_ms   Action timeout (in milliseconds)
  * \param[in]     check_level  OCF check level
  * \param[in]     verbosity    Verbosity level
  */
 static void
 set_agent_environment(GHashTable *params, int timeout_ms, int check_level,
                       int verbosity)
 {
     g_hash_table_insert(params, strdup("CRM_meta_timeout"),
                         crm_strdup_printf("%d", timeout_ms));
 
     g_hash_table_insert(params, strdup(XML_ATTR_CRM_VERSION),
                         strdup(CRM_FEATURE_SET));
 
     if (check_level >= 0) {
         char *level = crm_strdup_printf("%d", check_level);
 
         setenv("OCF_CHECK_LEVEL", level, 1);
         free(level);
     }
 
     setenv("HA_debug", (verbosity > 0)? "1" : "0", 1);
     if (verbosity > 1) {
         setenv("OCF_TRACE_RA", "1", 1);
     }
 
     /* A resource agent using the standard ocf-shellfuncs library will not print
      * messages to stderr if it doesn't have a controlling terminal (e.g. if
      * crm_resource is called via script or ssh). This forces it to do so.
      */
     setenv("OCF_TRACE_FILE", "/dev/stderr", 0);
 }
 
 /*!
  * \internal
  * \brief Apply command-line overrides to resource parameters
  *
  * \param[in,out] params     Parameters to be passed to agent
  * \param[in]     overrides  Parameters to override (or NULL if none)
  */
 static void
 apply_overrides(GHashTable *params, GHashTable *overrides)
 {
     if (overrides != NULL) {
         GHashTableIter iter;
         char *name = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, overrides);
         while (g_hash_table_iter_next(&iter, (gpointer *) &name,
                                       (gpointer *) &value)) {
             g_hash_table_replace(params, strdup(name), strdup(value));
         }
     }
 }
 
 crm_exit_t
 cli_resource_execute_from_params(pcmk__output_t *out, const char *rsc_name,
                                  const char *rsc_class, const char *rsc_prov,
                                  const char *rsc_type, const char *rsc_action,
                                  GHashTable *params, GHashTable *override_hash,
                                  int timeout_ms, int resource_verbose, gboolean force,
                                  int check_level)
 {
     const char *class = rsc_class;
     const char *action = get_action(rsc_action);
     crm_exit_t exit_code = CRM_EX_OK;
     svc_action_t *op = NULL;
 
     // If no timeout was provided, use the same default as the cluster
     if (timeout_ms == 0) {
         timeout_ms = PCMK_DEFAULT_ACTION_TIMEOUT_MS;
     }
 
     set_agent_environment(params, timeout_ms, check_level, resource_verbose);
     apply_overrides(params, override_hash);
 
     op = services__create_resource_action(rsc_name? rsc_name : "test",
                                           rsc_class, rsc_prov, rsc_type, action,
                                           0, timeout_ms, params, 0);
     if (op == NULL) {
         out->err(out, "Could not execute %s using %s%s%s:%s: %s",
                  action, rsc_class, (rsc_prov? ":" : ""),
                  (rsc_prov? rsc_prov : ""), rsc_type, strerror(ENOMEM));
         g_hash_table_destroy(params);
         return CRM_EX_OSERR;
     }
 
     if (pcmk__str_eq(rsc_class, PCMK_RESOURCE_CLASS_SERVICE, pcmk__str_casei)) {
         class = resources_find_service_class(rsc_type);
     }
     if (!pcmk__strcase_any_of(class, PCMK_RESOURCE_CLASS_OCF,
                               PCMK_RESOURCE_CLASS_LSB, NULL)) {
         services__format_result(op, CRM_EX_UNIMPLEMENT_FEATURE, PCMK_EXEC_ERROR,
                                 "Manual execution of the %s standard is "
                                 "unsupported", pcmk__s(class, "unspecified"));
     }
 
     if (op->rc != PCMK_OCF_UNKNOWN) {
         exit_code = op->rc;
         goto done;
     }
 
     services_action_sync(op);
 
     // Map results to OCF codes for consistent reporting to user
     {
         enum ocf_exitcode ocf_code = services_result2ocf(class, action, op->rc);
 
         // Cast variable instead of function return to keep compilers happy
         exit_code = (crm_exit_t) ocf_code;
     }
 
 done:
     out->message(out, "resource-agent-action", resource_verbose, rsc_class,
                  rsc_prov, rsc_type, rsc_name, rsc_action, override_hash,
                  exit_code, op->status, services__exit_reason(op),
                  op->stdout_data, op->stderr_data);
     services_action_free(op);
     return exit_code;
 }
 
 crm_exit_t
 cli_resource_execute(pe_resource_t *rsc, const char *requested_name,
                      const char *rsc_action, GHashTable *override_hash,
                      int timeout_ms, cib_t * cib, pe_working_set_t *data_set,
                      int resource_verbose, gboolean force, int check_level)
 {
     pcmk__output_t *out = data_set->priv;
     crm_exit_t exit_code = CRM_EX_OK;
     const char *rid = NULL;
     const char *rtype = NULL;
     const char *rprov = NULL;
     const char *rclass = NULL;
     GHashTable *params = NULL;
 
     if (pcmk__strcase_any_of(rsc_action, "force-start", "force-demote",
                                     "force-promote", NULL)) {
         if(pe_rsc_is_clone(rsc)) {
             GList *nodes = cli_resource_search(rsc, requested_name, data_set);
             if(nodes != NULL && force == FALSE) {
                 out->err(out, "It is not safe to %s %s here: the cluster claims it is already active",
                          rsc_action, rsc->id);
                 out->err(out, "Try setting target-role=Stopped first or specifying "
                          "the force option");
                 return CRM_EX_UNSAFE;
             }
 
             g_list_free_full(nodes, free);
         }
     }
 
     if(pe_rsc_is_clone(rsc)) {
         /* Grab the first child resource in the hope it's not a group */
         rsc = rsc->children->data;
     }
 
-    if(rsc->variant == pe_group) {
+    if (rsc->variant == pcmk_rsc_variant_group) {
         out->err(out, "Sorry, the %s option doesn't support group resources", rsc_action);
         return CRM_EX_UNIMPLEMENT_FEATURE;
     } else if (rsc->variant == pe_container || pe_rsc_is_bundled(rsc)) {
         out->err(out, "Sorry, the %s option doesn't support bundled resources", rsc_action);
         return CRM_EX_UNIMPLEMENT_FEATURE;
     }
 
     rclass = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
     rprov = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER);
     rtype = crm_element_value(rsc->xml, XML_ATTR_TYPE);
 
     params = generate_resource_params(rsc, NULL /* @TODO use local node */,
                                       data_set);
 
     if (timeout_ms == 0) {
         timeout_ms = pe_get_configured_timeout(rsc, get_action(rsc_action), data_set);
     }
 
     rid = pe_rsc_is_anon_clone(rsc->parent)? requested_name : rsc->id;
 
     exit_code = cli_resource_execute_from_params(out, rid, rclass, rprov, rtype, rsc_action,
                                                  params, override_hash, timeout_ms,
                                                  resource_verbose, force, check_level);
     return exit_code;
 }
 
 // \return Standard Pacemaker return code
 int
 cli_resource_move(const pe_resource_t *rsc, const char *rsc_id,
                   const char *host_name, const char *move_lifetime, cib_t *cib,
                   int cib_options, pe_working_set_t *data_set,
                   gboolean promoted_role_only, gboolean force)
 {
     pcmk__output_t *out = data_set->priv;
     int rc = pcmk_rc_ok;
     unsigned int count = 0;
     pe_node_t *current = NULL;
     pe_node_t *dest = pe_find_node(data_set->nodes, host_name);
     bool cur_is_dest = false;
 
     if (dest == NULL) {
         return pcmk_rc_node_unknown;
     }
 
     if (promoted_role_only && !pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         const pe_resource_t *p = pe__const_top_resource(rsc, false);
 
         if (pcmk_is_set(p->flags, pe_rsc_promotable)) {
             out->info(out, "Using parent '%s' for move instead of '%s'.", rsc->id, rsc_id);
             rsc_id = p->id;
             rsc = p;
 
         } else {
             out->info(out, "Ignoring --promoted option: %s is not promotable",
                       rsc_id);
             promoted_role_only = FALSE;
         }
     }
 
     current = pe__find_active_requires(rsc, &count);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         unsigned int promoted_count = 0;
         pe_node_t *promoted_node = NULL;
 
         for (const GList *iter = rsc->children; iter; iter = iter->next) {
             const pe_resource_t *child = (const pe_resource_t *) iter->data;
             enum rsc_role_e child_role = child->fns->state(child, TRUE);
 
             if (child_role == pcmk_role_promoted) {
                 rsc = child;
                 promoted_node = pe__current_node(child);
                 promoted_count++;
             }
         }
         if (promoted_role_only || (promoted_count != 0)) {
             count = promoted_count;
             current = promoted_node;
         }
 
     }
 
     if (count > 1) {
         if (pe_rsc_is_clone(rsc)) {
             current = NULL;
         } else {
             return pcmk_rc_multiple;
         }
     }
 
     if (current && (current->details == dest->details)) {
         cur_is_dest = true;
         if (force) {
             crm_info("%s is already %s on %s, reinforcing placement with location constraint.",
                      rsc_id, promoted_role_only?"promoted":"active",
                      pe__node_name(dest));
         } else {
             return pcmk_rc_already;
         }
     }
 
     /* Clear any previous prefer constraints across all nodes. */
     cli_resource_clear(rsc_id, NULL, data_set->nodes, cib, cib_options, false, force);
 
     /* Clear any previous ban constraints on 'dest'. */
     cli_resource_clear(rsc_id, dest->details->uname, data_set->nodes, cib,
                        cib_options, TRUE, force);
 
     /* Record an explicit preference for 'dest' */
     rc = cli_resource_prefer(out, rsc_id, dest->details->uname, move_lifetime,
                              cib, cib_options, promoted_role_only);
 
     crm_trace("%s%s now prefers %s%s",
               rsc->id, (promoted_role_only? " (promoted)" : ""),
               pe__node_name(dest), force?"(forced)":"");
 
     /* only ban the previous location if current location != destination location.
      * it is possible to use -M to enforce a location without regard of where the
      * resource is currently located */
     if (force && !cur_is_dest) {
         /* Ban the original location if possible */
         if(current) {
             (void)cli_resource_ban(out, rsc_id, current->details->uname, move_lifetime,
                                    NULL, cib, cib_options, promoted_role_only);
 
         } else if(count > 1) {
             out->info(out, "Resource '%s' is currently %s in %d locations. "
                       "One may now move to %s",
                       rsc_id, (promoted_role_only? "promoted" : "active"),
                       count, pe__node_name(dest));
             out->info(out, "To prevent '%s' from being %s at a specific location, "
                       "specify a node.",
                       rsc_id, (promoted_role_only? "promoted" : "active"));
 
         } else {
             crm_trace("Not banning %s from its current location: not active", rsc_id);
         }
     }
 
     return rc;
 }