diff --git a/include/crm/common/scheduler_internal.h b/include/crm/common/scheduler_internal.h
index e3b9b302b7..295ca22c49 100644
--- a/include/crm/common/scheduler_internal.h
+++ b/include/crm/common/scheduler_internal.h
@@ -1,132 +1,134 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
 #define PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
 
 #include <crm/common/action_relation_internal.h>
 #include <crm/common/actions_internal.h>
 #include <crm/common/attrs_internal.h>
 #include <crm/common/bundles_internal.h>
 #include <crm/common/clone_internal.h>
 #include <crm/common/digest_internal.h>
 #include <crm/common/failcounts_internal.h>
 #include <crm/common/group_internal.h>
 #include <crm/common/history_internal.h>
 #include <crm/common/location_internal.h>
 #include <crm/common/nodes_internal.h>
 #include <crm/common/primitive_internal.h>
 #include <crm/common/remote_internal.h>
 #include <crm/common/resources_internal.h>
 #include <crm/common/roles_internal.h>
 #include <crm/common/rules_internal.h>
 #include <crm/common/tickets_internal.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 enum pcmk__check_parameters {
     /* Clear fail count if parameters changed for un-expired start or monitor
      * last_failure.
      */
     pcmk__check_last_failure,
 
     /* Clear fail count if parameters changed for start, monitor, promote, or
      * migrate_from actions for active resources.
      */
     pcmk__check_active,
 };
 
 // Group of enum pcmk__warnings flags for warnings we want to log once
 extern uint32_t pcmk__warnings;
 
 /*!
  * \internal
  * \brief Log a resource-tagged message at info severity
  *
  * \param[in] rsc       Tag message with this resource's ID
  * \param[in] fmt...    printf(3)-style format and arguments
  */
 #define pcmk__rsc_info(rsc, fmt, args...)   \
     crm_log_tag(LOG_INFO, ((rsc) == NULL)? "<NULL>" : (rsc)->id, (fmt), ##args)
 
 /*!
  * \internal
  * \brief Log a resource-tagged message at debug severity
  *
  * \param[in] rsc       Tag message with this resource's ID
  * \param[in] fmt...    printf(3)-style format and arguments
  */
 #define pcmk__rsc_debug(rsc, fmt, args...)  \
     crm_log_tag(LOG_DEBUG, ((rsc) == NULL)? "<NULL>" : (rsc)->id, (fmt), ##args)
 
 /*!
  * \internal
  * \brief Log a resource-tagged message at trace severity
  *
  * \param[in] rsc       Tag message with this resource's ID
  * \param[in] fmt...    printf(3)-style format and arguments
  */
 #define pcmk__rsc_trace(rsc, fmt, args...)  \
     crm_log_tag(LOG_TRACE, ((rsc) == NULL)? "<NULL>" : (rsc)->id, (fmt), ##args)
 
 /*!
  * \internal
  * \brief Log an error and remember that current scheduler input has errors
  *
- * \param[in] fmt...  printf(3)-style format and arguments
+ * \param[in,out] scheduler  Scheduler data
+ * \param[in]     fmt...     printf(3)-style format and arguments
  */
-#define pcmk__sched_err(fmt...) do {    \
-        was_processing_error = TRUE;    \
-        crm_err(fmt);                   \
+#define pcmk__sched_err(scheduler, fmt...) do {     \
+        was_processing_error = TRUE;                \
+        crm_err(fmt);                               \
     } while (0)
 
 /*!
  * \internal
  * \brief Log a warning and remember that current scheduler input has warnings
  *
- * \param[in] fmt...  printf(3)-style format and arguments
+ * \param[in,out] scheduler  Scheduler data
+ * \param[in]     fmt...     printf(3)-style format and arguments
  */
-#define pcmk__sched_warn(fmt...) do {   \
-        was_processing_warning = TRUE;  \
-        crm_warn(fmt);                  \
+#define pcmk__sched_warn(scheduler, fmt...) do {    \
+        was_processing_warning = TRUE;              \
+        crm_warn(fmt);                              \
     } while (0)
 
 /*!
  * \internal
  * \brief Set scheduler flags
  *
  * \param[in,out] scheduler     Scheduler data
  * \param[in]     flags_to_set  Group of enum pcmk_scheduler_flags to set
  */
 #define pcmk__set_scheduler_flags(scheduler, flags_to_set) do {             \
         (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__,         \
             LOG_TRACE, "Scheduler", crm_system_name,                        \
             (scheduler)->flags, (flags_to_set), #flags_to_set);             \
     } while (0)
 
 /*!
  * \internal
  * \brief Clear scheduler flags
  *
  * \param[in,out] scheduler       Scheduler data
  * \param[in]     flags_to_clear  Group of enum pcmk_scheduler_flags to clear
  */
 #define pcmk__clear_scheduler_flags(scheduler, flags_to_clear) do {         \
         (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__,       \
             LOG_TRACE, "Scheduler", crm_system_name,                        \
             (scheduler)->flags, (flags_to_clear), #flags_to_clear);         \
     } while (0)
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
diff --git a/lib/pacemaker/pcmk_graph_producer.c b/lib/pacemaker/pcmk_graph_producer.c
index deb443c2be..d46b78df4d 100644
--- a/lib/pacemaker/pcmk_graph_producer.c
+++ b/lib/pacemaker/pcmk_graph_producer.c
@@ -1,1105 +1,1108 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/common/xml.h>
 
 #include <glib.h>
 
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 // Convenience macros for logging action properties
 
 #define action_type_str(flags) \
     (pcmk_is_set((flags), pcmk__action_pseudo)? "pseudo-action" : "action")
 
 #define action_optional_str(flags) \
     (pcmk_is_set((flags), pcmk__action_optional)? "optional" : "required")
 
 #define action_runnable_str(flags) \
     (pcmk_is_set((flags), pcmk__action_runnable)? "runnable" : "unrunnable")
 
 #define action_node_str(a) \
     (((a)->node == NULL)? "no node" : (a)->node->private->name)
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified ID
  *
  * \param[in]     id      Node UUID to add
  * \param[in,out] xml     Parent XML tag to add to
  */
 static xmlNode*
 add_node_to_xml_by_id(const char *id, xmlNode *xml)
 {
     xmlNode *node_xml;
 
     node_xml = pcmk__xe_create(xml, PCMK_XE_NODE);
     crm_xml_add(node_xml, PCMK_XA_ID, id);
 
     return node_xml;
 }
 
 /*!
  * \internal
  * \brief Add an XML node tag for a specified node
  *
  * \param[in]     node  Node to add
  * \param[in,out] xml   XML to add node to
  */
 static void
 add_node_to_xml(const pcmk_node_t *node, void *xml)
 {
     add_node_to_xml_by_id(node->private->id, (xmlNode *) xml);
 }
 
 /*!
  * \internal
  * \brief Count (optionally add to XML) nodes needing maintenance state update
  *
  * \param[in,out] xml        Parent XML tag to add to, if any
  * \param[in]     scheduler  Scheduler data
  *
  * \return Count of nodes added
  * \note Only Pacemaker Remote nodes are considered currently
  */
 static int
 add_maintenance_nodes(xmlNode *xml, const pcmk_scheduler_t *scheduler)
 {
     xmlNode *maintenance = NULL;
     int count = 0;
 
     if (xml != NULL) {
         maintenance = pcmk__xe_create(xml, PCMK__XE_MAINTENANCE);
     }
     for (const GList *iter = scheduler->nodes;
          iter != NULL; iter = iter->next) {
         const pcmk_node_t *node = iter->data;
 
         if (!pcmk__is_pacemaker_remote_node(node)) {
             continue;
         }
         if ((node->details->maintenance
              && !pcmk_is_set(node->private->flags, pcmk__node_remote_maint))
             || (!node->details->maintenance
                 && pcmk_is_set(node->private->flags, pcmk__node_remote_maint))) {
 
             if (maintenance != NULL) {
                 crm_xml_add(add_node_to_xml_by_id(node->private->id,
                                                   maintenance),
                             PCMK__XA_NODE_IN_MAINTENANCE,
                             (node->details->maintenance? "1" : "0"));
             }
             count++;
         }
     }
     crm_trace("%s %d nodes in need of maintenance mode update in state",
               ((maintenance == NULL)? "Counted" : "Added"), count);
     return count;
 }
 
 /*!
  * \internal
  * \brief Add pseudo action with nodes needing maintenance state update
  *
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 add_maintenance_update(pcmk_scheduler_t *scheduler)
 {
     pcmk_action_t *action = NULL;
 
     if (add_maintenance_nodes(NULL, scheduler) != 0) {
         action = get_pseudo_op(PCMK_ACTION_MAINTENANCE_NODES, scheduler);
         pcmk__set_action_flags(action, pcmk__action_always_in_graph);
     }
 }
 
 /*!
  * \internal
  * \brief Add XML with nodes that an action is expected to bring down
  *
  * If a specified action is expected to bring any nodes down, add an XML block
  * with their UUIDs. When a node is lost, this allows the controller to
  * determine whether it was expected.
  *
  * \param[in,out] xml       Parent XML tag to add to
  * \param[in]     action    Action to check for downed nodes
  */
 static void
 add_downed_nodes(xmlNode *xml, const pcmk_action_t *action)
 {
     CRM_CHECK((xml != NULL) && (action != NULL) && (action->node != NULL),
               return);
 
     if (pcmk__str_eq(action->task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_none)) {
 
         /* Shutdown makes the action's node down */
         xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED);
         add_node_to_xml_by_id(action->node->private->id, downed);
 
     } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH,
                             pcmk__str_none)) {
 
         /* Fencing makes the action's node and any hosted guest nodes down */
         const char *fence = g_hash_table_lookup(action->meta,
                                                 PCMK__META_STONITH_ACTION);
 
         if (pcmk__is_fencing_action(fence)) {
             xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED);
             add_node_to_xml_by_id(action->node->private->id, downed);
             pe_foreach_guest_node(action->node->private->scheduler,
                                   action->node, add_node_to_xml, downed);
         }
 
     } else if ((action->rsc != NULL)
                && pcmk_is_set(action->rsc->flags,
                               pcmk__rsc_is_remote_connection)
                && pcmk__str_eq(action->task, PCMK_ACTION_STOP,
                                pcmk__str_none)) {
 
         /* Stopping a remote connection resource makes connected node down,
          * unless it's part of a migration
          */
         GList *iter;
         pcmk_action_t *input;
         bool migrating = false;
 
         for (iter = action->actions_before; iter != NULL; iter = iter->next) {
             input = ((pcmk__related_action_t *) iter->data)->action;
             if ((input->rsc != NULL)
                 && pcmk__str_eq(action->rsc->id, input->rsc->id, pcmk__str_none)
                 && pcmk__str_eq(input->task, PCMK_ACTION_MIGRATE_FROM,
                                 pcmk__str_none)) {
                 migrating = true;
                 break;
             }
         }
         if (!migrating) {
             xmlNode *downed = pcmk__xe_create(xml, PCMK__XE_DOWNED);
             add_node_to_xml_by_id(action->rsc->id, downed);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create a transition graph operation key for a clone action
  *
  * \param[in] action       Clone action
  * \param[in] interval_ms  Action interval in milliseconds
  *
  * \return Newly allocated string with transition graph operation key
  */
 static char *
 clone_op_key(const pcmk_action_t *action, guint interval_ms)
 {
     if (pcmk__str_eq(action->task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
         const char *n_type = g_hash_table_lookup(action->meta, "notify_type");
         const char *n_task = g_hash_table_lookup(action->meta,
                                                  "notify_operation");
 
         return pcmk__notify_key(action->rsc->private->history_id, n_type,
                                 n_task);
     }
     return pcmk__op_key(action->rsc->private->history_id,
                         pcmk__s(action->cancel_task, action->task),
                         interval_ms);
 }
 
 /*!
  * \internal
  * \brief Add node details to transition graph action XML
  *
  * \param[in]     action  Scheduled action
  * \param[in,out] xml     Transition graph action XML for \p action
  */
 static void
 add_node_details(const pcmk_action_t *action, xmlNode *xml)
 {
     pcmk_node_t *router_node = pcmk__connection_host_for_action(action);
 
     crm_xml_add(xml, PCMK__META_ON_NODE, action->node->private->name);
     crm_xml_add(xml, PCMK__META_ON_NODE_UUID, action->node->private->id);
     if (router_node != NULL) {
         crm_xml_add(xml, PCMK__XA_ROUTER_NODE, router_node->private->name);
     }
 }
 
 /*!
  * \internal
  * \brief Add resource details to transition graph action XML
  *
  * \param[in]     action      Scheduled action
  * \param[in,out] action_xml  Transition graph action XML for \p action
  */
 static void
 add_resource_details(const pcmk_action_t *action, xmlNode *action_xml)
 {
     xmlNode *rsc_xml = NULL;
     const char *attr_list[] = {
         PCMK_XA_CLASS,
         PCMK_XA_PROVIDER,
         PCMK_XA_TYPE,
     };
 
     /* If a resource is locked to a node via PCMK_OPT_SHUTDOWN_LOCK, mark its
      * actions so the controller can preserve the lock when the action
      * completes.
      */
     if (pcmk__action_locks_rsc_to_node(action)) {
         crm_xml_add_ll(action_xml, PCMK_OPT_SHUTDOWN_LOCK,
                        (long long) action->rsc->private->lock_time);
     }
 
     // List affected resource
 
     rsc_xml = pcmk__xe_create(action_xml,
                               (const char *) action->rsc->private->xml->name);
     if (pcmk_is_set(action->rsc->flags, pcmk__rsc_removed)
         && (action->rsc->private->history_id != NULL)) {
         /* Use the numbered instance name here, because if there is more
          * than one instance on a node, we need to make sure the command
          * goes to the right one.
          *
          * This is important even for anonymous clones, because the clone's
          * unique meta-attribute might have just been toggled from on to
          * off.
          */
         crm_debug("Using orphan clone name %s instead of history ID %s",
                   action->rsc->id, action->rsc->private->history_id);
         crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->private->history_id);
         crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id);
 
     } else if (!pcmk_is_set(action->rsc->flags, pcmk__rsc_unique)) {
         const char *xml_id = pcmk__xe_id(action->rsc->private->xml);
 
         crm_debug("Using anonymous clone name %s for %s (aka %s)",
                   xml_id, action->rsc->id, action->rsc->private->history_id);
 
         /* ID is what we'd like client to use
          * LONG_ID is what they might know it as instead
          *
          * LONG_ID is only strictly needed /here/ during the
          * transition period until all nodes in the cluster
          * are running the new software /and/ have rebooted
          * once (meaning that they've only ever spoken to a DC
          * supporting this feature).
          *
          * If anyone toggles the unique flag to 'on', the
          * 'instance free' name will correspond to an orphan
          * and fall into the clause above instead
          */
         crm_xml_add(rsc_xml, PCMK_XA_ID, xml_id);
         if ((action->rsc->private->history_id != NULL)
             && !pcmk__str_eq(xml_id, action->rsc->private->history_id,
                              pcmk__str_none)) {
             crm_xml_add(rsc_xml, PCMK__XA_LONG_ID,
                         action->rsc->private->history_id);
         } else {
             crm_xml_add(rsc_xml, PCMK__XA_LONG_ID, action->rsc->id);
         }
 
     } else {
         CRM_ASSERT(action->rsc->private->history_id == NULL);
         crm_xml_add(rsc_xml, PCMK_XA_ID, action->rsc->id);
     }
 
     for (int lpc = 0; lpc < PCMK__NELEM(attr_list); lpc++) {
         crm_xml_add(rsc_xml, attr_list[lpc],
                     g_hash_table_lookup(action->rsc->private->meta,
                                         attr_list[lpc]));
     }
 }
 
 /*!
  * \internal
  * \brief Add action attributes to transition graph action XML
  *
  * \param[in,out] action      Scheduled action
  * \param[in,out] action_xml  Transition graph action XML for \p action
  */
 static void
 add_action_attributes(pcmk_action_t *action, xmlNode *action_xml)
 {
     xmlNode *args_xml = NULL;
     pcmk_resource_t *rsc = action->rsc;
 
     /* We create free-standing XML to start, so we can sort the attributes
      * before adding it to action_xml, which keeps the scheduler regression
      * test graphs comparable.
      */
     args_xml = pcmk__xe_create(action_xml, PCMK__XE_ATTRIBUTES);
 
     crm_xml_add(args_xml, PCMK_XA_CRM_FEATURE_SET, CRM_FEATURE_SET);
     g_hash_table_foreach(action->extra, hash2field, args_xml);
 
     if ((rsc != NULL) && (action->node != NULL)) {
         // Get the resource instance attributes, evaluated properly for node
         GHashTable *params = pe_rsc_params(rsc, action->node,
                                            rsc->private->scheduler);
 
         pcmk__substitute_remote_addr(rsc, params);
 
         g_hash_table_foreach(params, hash2smartfield, args_xml);
 
     } else if ((rsc != NULL)
                && (rsc->private->variant <= pcmk__rsc_variant_primitive)) {
         GHashTable *params = pe_rsc_params(rsc, NULL, rsc->private->scheduler);
 
         g_hash_table_foreach(params, hash2smartfield, args_xml);
     }
 
     g_hash_table_foreach(action->meta, hash2metafield, args_xml);
     if (rsc != NULL) {
         pcmk_resource_t *parent = rsc;
 
         while (parent != NULL) {
             parent->private->cmds->add_graph_meta(parent, args_xml);
             parent = parent->private->parent;
         }
 
         pcmk__add_guest_meta_to_xml(args_xml, action);
 
     } else if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)
                && (action->node != NULL)) {
         /* Pass the node's attributes as meta-attributes.
          *
          * @TODO: Determine whether it is still necessary to do this. It was
          * added in 33d99707, probably for the libfence-based implementation in
          * c9a90bd, which is no longer used.
          */
         g_hash_table_foreach(action->node->private->attrs, hash2metafield,
                              args_xml);
     }
 
     pcmk__xe_sort_attrs(args_xml);
 }
 
 /*!
  * \internal
  * \brief Create the transition graph XML for a scheduled action
  *
  * \param[in,out] parent        Parent XML element to add action to
  * \param[in,out] action        Scheduled action
  * \param[in]     skip_details  If false, add action details as sub-elements
  * \param[in]     scheduler     Scheduler data
  */
 static void
 create_graph_action(xmlNode *parent, pcmk_action_t *action, bool skip_details,
                     const pcmk_scheduler_t *scheduler)
 {
     bool needs_node_info = true;
     bool needs_maintenance_info = false;
     xmlNode *action_xml = NULL;
 
     if ((action == NULL) || (scheduler == NULL)) {
         return;
     }
 
     // Create the top-level element based on task
 
     if (pcmk__str_eq(action->task, PCMK_ACTION_STONITH, pcmk__str_none)) {
         /* All fences need node info; guest node fences are pseudo-events */
         if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
             action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT);
         } else {
             action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT);
         }
 
     } else if (pcmk__str_any_of(action->task,
                                 PCMK_ACTION_DO_SHUTDOWN,
                                 PCMK_ACTION_CLEAR_FAILCOUNT, NULL)) {
         action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT);
 
     } else if (pcmk__str_eq(action->task, PCMK_ACTION_LRM_DELETE,
                             pcmk__str_none)) {
         // CIB-only clean-up for shutdown locks
         action_xml = pcmk__xe_create(parent, PCMK__XE_CRM_EVENT);
         crm_xml_add(action_xml, PCMK__XA_MODE, PCMK__VALUE_CIB);
 
     } else if (pcmk_is_set(action->flags, pcmk__action_pseudo)) {
         if (pcmk__str_eq(action->task, PCMK_ACTION_MAINTENANCE_NODES,
                          pcmk__str_none)) {
             needs_maintenance_info = true;
         }
         action_xml = pcmk__xe_create(parent, PCMK__XE_PSEUDO_EVENT);
         needs_node_info = false;
 
     } else {
         action_xml = pcmk__xe_create(parent, PCMK__XE_RSC_OP);
     }
 
     crm_xml_add_int(action_xml, PCMK_XA_ID, action->id);
     crm_xml_add(action_xml, PCMK_XA_OPERATION, action->task);
 
     if ((action->rsc != NULL) && (action->rsc->private->history_id != NULL)) {
         char *clone_key = NULL;
         guint interval_ms;
 
         if (pcmk__guint_from_hash(action->meta, PCMK_META_INTERVAL, 0,
                                   &interval_ms) != pcmk_rc_ok) {
             interval_ms = 0;
         }
         clone_key = clone_op_key(action, interval_ms);
         crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, clone_key);
         crm_xml_add(action_xml, "internal_" PCMK__XA_OPERATION_KEY,
                     action->uuid);
         free(clone_key);
     } else {
         crm_xml_add(action_xml, PCMK__XA_OPERATION_KEY, action->uuid);
     }
 
     if (needs_node_info && (action->node != NULL)) {
         add_node_details(action, action_xml);
         pcmk__insert_dup(action->meta, PCMK__META_ON_NODE,
                          action->node->private->name);
         pcmk__insert_dup(action->meta, PCMK__META_ON_NODE_UUID,
                          action->node->private->id);
     }
 
     if (skip_details) {
         return;
     }
 
     if ((action->rsc != NULL)
         && !pcmk_is_set(action->flags, pcmk__action_pseudo)) {
 
         // This is a real resource action, so add resource details
         add_resource_details(action, action_xml);
     }
 
     /* List any attributes in effect */
     add_action_attributes(action, action_xml);
 
     /* List any nodes this action is expected to make down */
     if (needs_node_info && (action->node != NULL)) {
         add_downed_nodes(action_xml, action);
     }
 
     if (needs_maintenance_info) {
         add_maintenance_nodes(action_xml, scheduler);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether an action should be added to the transition graph
  *
- * \param[in] action  Action to check
+ * \param[in,out] action  Action to check
  *
  * \return true if action should be added to graph, otherwise false
  */
 static bool
-should_add_action_to_graph(const pcmk_action_t *action)
+should_add_action_to_graph(pcmk_action_t *action)
 {
     if (!pcmk_is_set(action->flags, pcmk__action_runnable)) {
         crm_trace("Ignoring action %s (%d): unrunnable",
                   action->uuid, action->id);
         return false;
     }
 
     if (pcmk_is_set(action->flags, pcmk__action_optional)
         && !pcmk_is_set(action->flags, pcmk__action_always_in_graph)) {
         crm_trace("Ignoring action %s (%d): optional",
                   action->uuid, action->id);
         return false;
     }
 
     /* Actions for unmanaged resources should be excluded from the graph,
      * with the exception of monitors and cancellation of recurring monitors.
      */
     if ((action->rsc != NULL)
         && !pcmk_is_set(action->rsc->flags, pcmk__rsc_managed)
         && !pcmk__str_eq(action->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
 
         const char *interval_ms_s;
 
         /* A cancellation of a recurring monitor will get here because the task
          * is cancel rather than monitor, but the interval can still be used to
          * recognize it. The interval has been normalized to milliseconds by
          * this point, so a string comparison is sufficient.
          */
         interval_ms_s = g_hash_table_lookup(action->meta, PCMK_META_INTERVAL);
         if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)) {
             crm_trace("Ignoring action %s (%d): for unmanaged resource (%s)",
                       action->uuid, action->id, action->rsc->id);
             return false;
         }
     }
 
     /* Always add pseudo-actions, fence actions, and shutdown actions (already
      * determined to be required and runnable by this point)
      */
     if (pcmk_is_set(action->flags, pcmk__action_pseudo)
         || pcmk__strcase_any_of(action->task, PCMK_ACTION_STONITH,
                                 PCMK_ACTION_DO_SHUTDOWN, NULL)) {
         return true;
     }
 
     if (action->node == NULL) {
-        pcmk__sched_err("Skipping action %s (%d) "
+        pcmk__sched_err(action->scheduler,
+                        "Skipping action %s (%d) "
                         "because it was not assigned to a node (bug?)",
                         action->uuid, action->id);
         pcmk__log_action("Unassigned", action, false);
         return false;
     }
 
     if (pcmk_is_set(action->flags, pcmk__action_on_dc)) {
         crm_trace("Action %s (%d) should be dumped: "
                   "can run on DC instead of %s",
                   action->uuid, action->id, pcmk__node_name(action->node));
 
     } else if (pcmk__is_guest_or_bundle_node(action->node)
                && !pcmk_is_set(action->node->private->flags,
                                pcmk__node_remote_reset)) {
         crm_trace("Action %s (%d) should be dumped: "
                   "assuming will be runnable on guest %s",
                   action->uuid, action->id, pcmk__node_name(action->node));
 
     } else if (!action->node->details->online) {
-        pcmk__sched_err("Skipping action %s (%d) "
+        pcmk__sched_err(action->scheduler,
+                        "Skipping action %s (%d) "
                         "because it was scheduled for offline node (bug?)",
                         action->uuid, action->id);
         pcmk__log_action("Offline node", action, false);
         return false;
 
     } else if (action->node->details->unclean) {
-        pcmk__sched_err("Skipping action %s (%d) "
+        pcmk__sched_err(action->scheduler,
+                        "Skipping action %s (%d) "
                         "because it was scheduled for unclean node (bug?)",
                         action->uuid, action->id);
         pcmk__log_action("Unclean node", action, false);
         return false;
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether an ordering's flags can change an action
  *
  * \param[in] ordering  Ordering to check
  *
  * \return true if ordering has flags that can change an action, false otherwise
  */
 static bool
 ordering_can_change_actions(const pcmk__related_action_t *ordering)
 {
     return pcmk_any_flags_set(ordering->flags,
                               ~(pcmk__ar_then_implies_first_graphed
                                 |pcmk__ar_first_implies_then_graphed
                                 |pcmk__ar_ordered));
 }
 
 /*!
  * \internal
  * \brief Check whether an action input should be in the transition graph
  *
  * \param[in]     action  Action to check
  * \param[in,out] input   Action input to check
  *
  * \return true if input should be in graph, false otherwise
  * \note This function may not only check an input, but disable it under certian
  *       circumstances (load or anti-colocation orderings that are not needed).
  */
 static bool
 should_add_input_to_graph(const pcmk_action_t *action,
                           pcmk__related_action_t *input)
 {
     if (input->graphed) {
         return true;
     }
 
     if (input->flags == pcmk__ar_none) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "ordering disabled",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (!pcmk_is_set(input->action->flags, pcmk__action_runnable)
                && !ordering_can_change_actions(input)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "optional and input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (!pcmk_is_set(input->action->flags, pcmk__action_runnable)
                && pcmk_is_set(input->flags, pcmk__ar_min_runnable)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "minimum number of instances required but input unrunnable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (pcmk_is_set(input->flags, pcmk__ar_unmigratable_then_blocks)
                && !pcmk_is_set(input->action->flags, pcmk__action_runnable)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "input blocked if 'then' unmigratable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (pcmk_is_set(input->flags, pcmk__ar_if_first_unmigratable)
                && pcmk_is_set(input->action->flags, pcmk__action_migratable)) {
         crm_trace("Ignoring %s (%d) input %s (%d): ordering applies "
                   "only if input is unmigratable, but it is migratable",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if ((input->flags == pcmk__ar_ordered)
                && pcmk_is_set(input->action->flags, pcmk__action_migratable)
                && pcmk__ends_with(input->action->uuid, "_stop_0")) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "optional but stop in migration",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
 
     } else if (input->flags == pcmk__ar_if_on_same_node_or_target) {
         pcmk_node_t *input_node = input->action->node;
 
         if ((action->rsc != NULL)
             && pcmk__str_eq(action->task, PCMK_ACTION_MIGRATE_TO,
                             pcmk__str_none)) {
 
             pcmk_node_t *assigned = action->rsc->private->assigned_node;
 
             /* For load_stopped -> migrate_to orderings, we care about where
              * the resource has been assigned, not where migrate_to will be
              * executed.
              */
             if (!pcmk__same_node(input_node, assigned)) {
                 crm_trace("Ignoring %s (%d) input %s (%d): "
                           "migration target %s is not same as input node %s",
                           action->uuid, action->id,
                           input->action->uuid, input->action->id,
                           (assigned? assigned->private->name : "<none>"),
                           (input_node? input_node->private->name : "<none>"));
                 input->flags = pcmk__ar_none;
                 return false;
             }
 
         } else if (!pcmk__same_node(input_node, action->node)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "not on same node (%s vs %s)",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id,
                       (action->node? action->node->private->name : "<none>"),
                       (input_node? input_node->private->name : "<none>"));
             input->flags = pcmk__ar_none;
             return false;
 
         } else if (pcmk_is_set(input->action->flags, pcmk__action_optional)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "ordering optional",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id);
             input->flags = pcmk__ar_none;
             return false;
         }
 
     } else if (input->flags == pcmk__ar_if_required_on_same_node) {
         if (input->action->node && action->node
             && !pcmk__same_node(input->action->node, action->node)) {
             crm_trace("Ignoring %s (%d) input %s (%d): "
                       "not on same node (%s vs %s)",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id,
                       pcmk__node_name(action->node),
                       pcmk__node_name(input->action->node));
             input->flags = pcmk__ar_none;
             return false;
 
         } else if (pcmk_is_set(input->action->flags, pcmk__action_optional)) {
             crm_trace("Ignoring %s (%d) input %s (%d): optional",
                       action->uuid, action->id,
                       input->action->uuid, input->action->id);
             input->flags = pcmk__ar_none;
             return false;
         }
 
     } else if (input->action->rsc
                && input->action->rsc != action->rsc
                && pcmk_is_set(input->action->rsc->flags, pcmk__rsc_failed)
                && !pcmk_is_set(input->action->rsc->flags, pcmk__rsc_managed)
                && pcmk__ends_with(input->action->uuid, "_stop_0")
                && pcmk__is_clone(action->rsc)) {
         crm_warn("Ignoring requirement that %s complete before %s:"
                  " unmanaged failed resources cannot prevent clone shutdown",
                  input->action->uuid, action->uuid);
         return false;
 
     } else if (pcmk_is_set(input->action->flags, pcmk__action_optional)
                && !pcmk_any_flags_set(input->action->flags,
                                       pcmk__action_always_in_graph
                                       |pcmk__action_added_to_graph)
                && !should_add_action_to_graph(input->action)) {
         crm_trace("Ignoring %s (%d) input %s (%d): "
                   "input optional",
                   action->uuid, action->id,
                   input->action->uuid, input->action->id);
         return false;
     }
 
     crm_trace("%s (%d) input %s %s (%d) on %s should be dumped: %s %s %#.6x",
               action->uuid, action->id, action_type_str(input->action->flags),
               input->action->uuid, input->action->id,
               action_node_str(input->action),
               action_runnable_str(input->action->flags),
               action_optional_str(input->action->flags), input->flags);
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether an ordering creates an ordering loop
  *
  * \param[in]     init_action  "First" action in ordering
  * \param[in]     action       Callers should always set this the same as
  *                             \p init_action (this function may use a different
  *                             value for recursive calls)
  * \param[in,out] input        Action wrapper for "then" action in ordering
  *
  * \return true if the ordering creates a loop, otherwise false
  */
 bool
 pcmk__graph_has_loop(const pcmk_action_t *init_action,
                      const pcmk_action_t *action, pcmk__related_action_t *input)
 {
     bool has_loop = false;
 
     if (pcmk_is_set(input->action->flags, pcmk__action_detect_loop)) {
         crm_trace("Breaking tracking loop: %s@%s -> %s@%s (%#.6x)",
                   input->action->uuid,
                   input->action->node? input->action->node->private->name : "",
                   action->uuid,
                   action->node? action->node->private->name : "",
                   input->flags);
         return false;
     }
 
     // Don't need to check inputs that won't be used
     if (!should_add_input_to_graph(action, input)) {
         return false;
     }
 
     if (input->action == init_action) {
         crm_debug("Input loop found in %s@%s ->...-> %s@%s",
                   action->uuid,
                   action->node? action->node->private->name : "",
                   init_action->uuid,
                   init_action->node? init_action->node->private->name : "");
         return true;
     }
 
     pcmk__set_action_flags(input->action, pcmk__action_detect_loop);
 
     crm_trace("Checking inputs of action %s@%s input %s@%s (%#.6x)"
               "for graph loop with %s@%s ",
               action->uuid,
               action->node? action->node->private->name : "",
               input->action->uuid,
               input->action->node? input->action->node->private->name : "",
               input->flags,
               init_action->uuid,
               init_action->node? init_action->node->private->name : "");
 
     // Recursively check input itself for loops
     for (GList *iter = input->action->actions_before;
          iter != NULL; iter = iter->next) {
 
         if (pcmk__graph_has_loop(init_action, input->action,
                                  (pcmk__related_action_t *) iter->data)) {
             // Recursive call already logged a debug message
             has_loop = true;
             break;
         }
     }
 
     pcmk__clear_action_flags(input->action, pcmk__action_detect_loop);
 
     if (!has_loop) {
         crm_trace("No input loop found in %s@%s -> %s@%s (%#.6x)",
                   input->action->uuid,
                   input->action->node? input->action->node->private->name : "",
                   action->uuid,
                   action->node? action->node->private->name : "",
                   input->flags);
     }
     return has_loop;
 }
 
 /*!
  * \internal
  * \brief Create a synapse XML element for a transition graph
  *
  * \param[in]     action     Action that synapse is for
  * \param[in,out] scheduler  Scheduler data containing graph
  *
  * \return Newly added XML element for new graph synapse
  */
 static xmlNode *
 create_graph_synapse(const pcmk_action_t *action, pcmk_scheduler_t *scheduler)
 {
     int synapse_priority = 0;
     xmlNode *syn = pcmk__xe_create(scheduler->graph, "synapse");
 
     crm_xml_add_int(syn, PCMK_XA_ID, scheduler->num_synapse);
     scheduler->num_synapse++;
 
     if (action->rsc != NULL) {
         synapse_priority = action->rsc->private->priority;
     }
     if (action->priority > synapse_priority) {
         synapse_priority = action->priority;
     }
     if (synapse_priority > 0) {
         crm_xml_add_int(syn, PCMK__XA_PRIORITY, synapse_priority);
     }
     return syn;
 }
 
 /*!
  * \internal
  * \brief Add an action to the transition graph XML if appropriate
  *
  * \param[in,out] data       Action to possibly add
  * \param[in,out] user_data  Scheduler data
  *
  * \note This will de-duplicate the action inputs, meaning that the
  *       pcmk__related_action_t:type flags can no longer be relied on to retain
  *       their original settings. That means this MUST be called after
  *       pcmk__apply_orderings() is complete, and nothing after this should rely
  *       on those type flags. (For example, some code looks for type equal to
  *       some flag rather than whether the flag is set, and some code looks for
  *       particular combinations of flags -- such code must be done before
  *       pcmk__create_graph().)
  */
 static void
 add_action_to_graph(gpointer data, gpointer user_data)
 {
     pcmk_action_t *action = (pcmk_action_t *) data;
     pcmk_scheduler_t *scheduler = (pcmk_scheduler_t *) user_data;
 
     xmlNode *syn = NULL;
     xmlNode *set = NULL;
     xmlNode *in = NULL;
 
     /* If we haven't already, de-duplicate inputs (even if we won't be adding
      * the action to the graph, so that crm_simulate's dot graphs don't have
      * duplicates).
      */
     if (!pcmk_is_set(action->flags, pcmk__action_inputs_deduplicated)) {
         pcmk__deduplicate_action_inputs(action);
         pcmk__set_action_flags(action, pcmk__action_inputs_deduplicated);
     }
 
     if (pcmk_is_set(action->flags, pcmk__action_added_to_graph)
         || !should_add_action_to_graph(action)) {
         return; // Already added, or shouldn't be
     }
     pcmk__set_action_flags(action, pcmk__action_added_to_graph);
 
     crm_trace("Adding action %d (%s%s%s) to graph",
               action->id, action->uuid,
               ((action->node == NULL)? "" : " on "),
               ((action->node == NULL)? "" : action->node->private->name));
 
     syn = create_graph_synapse(action, scheduler);
     set = pcmk__xe_create(syn, "action_set");
     in = pcmk__xe_create(syn, "inputs");
 
     create_graph_action(set, action, false, scheduler);
 
     for (GList *lpc = action->actions_before; lpc != NULL; lpc = lpc->next) {
         pcmk__related_action_t *input = lpc->data;
 
         if (should_add_input_to_graph(action, input)) {
             xmlNode *input_xml = pcmk__xe_create(in, "trigger");
 
             input->graphed = true;
             create_graph_action(input_xml, input->action, true, scheduler);
         }
     }
 }
 
 static int transition_id = -1;
 
 /*!
  * \internal
  * \brief Log a message after calculating a transition
  *
  * \param[in] filename  Where transition input is stored
  */
 void
 pcmk__log_transition_summary(const char *filename)
 {
     if (was_processing_error || crm_config_error) {
         crm_err("Calculated transition %d (with errors)%s%s",
                 transition_id,
                 (filename == NULL)? "" : ", saving inputs in ",
                 (filename == NULL)? "" : filename);
 
     } else if (was_processing_warning || crm_config_warning) {
         crm_warn("Calculated transition %d (with warnings)%s%s",
                  transition_id,
                  (filename == NULL)? "" : ", saving inputs in ",
                  (filename == NULL)? "" : filename);
 
     } else {
         crm_notice("Calculated transition %d%s%s",
                    transition_id,
                    (filename == NULL)? "" : ", saving inputs in ",
                    (filename == NULL)? "" : filename);
     }
     if (crm_config_error) {
         crm_notice("Configuration errors found during scheduler processing,"
                    "  please run \"crm_verify -L\" to identify issues");
     }
 }
 
 /*!
  * \internal
  * \brief Add a resource's actions to the transition graph
  *
  * \param[in,out] rsc  Resource whose actions should be added
  */
 void
 pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc)
 {
     GList *iter = NULL;
 
     CRM_ASSERT(rsc != NULL);
 
     pcmk__rsc_trace(rsc, "Adding actions for %s to graph", rsc->id);
 
     // First add the resource's own actions
     g_list_foreach(rsc->private->actions, add_action_to_graph,
                    rsc->private->scheduler);
 
     // Then recursively add its children's actions (appropriate to variant)
     for (iter = rsc->private->children; iter != NULL; iter = iter->next) {
         pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
 
         child_rsc->private->cmds->add_actions_to_graph(child_rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Create a transition graph with all cluster actions needed
  *
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__create_graph(pcmk_scheduler_t *scheduler)
 {
     GList *iter = NULL;
     const char *value = NULL;
     long long limit = 0LL;
     GHashTable *config_hash = scheduler->config_hash;
 
     transition_id++;
     crm_trace("Creating transition graph %d", transition_id);
 
     scheduler->graph = pcmk__xe_create(NULL, PCMK__XE_TRANSITION_GRAPH);
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_CLUSTER_DELAY);
     crm_xml_add(scheduler->graph, PCMK_OPT_CLUSTER_DELAY, value);
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT);
     crm_xml_add(scheduler->graph, PCMK_OPT_STONITH_TIMEOUT, value);
 
     crm_xml_add(scheduler->graph, "failed-stop-offset", "INFINITY");
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
         crm_xml_add(scheduler->graph, "failed-start-offset", "INFINITY");
     } else {
         crm_xml_add(scheduler->graph, "failed-start-offset", "1");
     }
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_BATCH_LIMIT);
     crm_xml_add(scheduler->graph, PCMK_OPT_BATCH_LIMIT, value);
 
     crm_xml_add_int(scheduler->graph, "transition_id", transition_id);
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_MIGRATION_LIMIT);
     if ((pcmk__scan_ll(value, &limit, 0LL) == pcmk_rc_ok) && (limit > 0)) {
         crm_xml_add(scheduler->graph, PCMK_OPT_MIGRATION_LIMIT, value);
     }
 
     if (scheduler->recheck_by > 0) {
         char *recheck_epoch = NULL;
 
         recheck_epoch = crm_strdup_printf("%llu",
                                           (long long) scheduler->recheck_by);
         crm_xml_add(scheduler->graph, "recheck-by", recheck_epoch);
         free(recheck_epoch);
     }
 
     /* The following code will de-duplicate action inputs, so nothing past this
      * should rely on the action input type flags retaining their original
      * values.
      */
 
     // Add resource actions to graph
     for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
 
         pcmk__rsc_trace(rsc, "Processing actions for %s", rsc->id);
         rsc->private->cmds->add_actions_to_graph(rsc);
     }
 
     // Add pseudo-action for list of nodes with maintenance state update
     add_maintenance_update(scheduler);
 
     // Add non-resource (node) actions
     for (iter = scheduler->actions; iter != NULL; iter = iter->next) {
         pcmk_action_t *action = (pcmk_action_t *) iter->data;
 
         if ((action->rsc != NULL)
             && (action->node != NULL)
             && action->node->details->shutdown
             && !pcmk_is_set(action->rsc->flags, pcmk__rsc_maintenance)
             && !pcmk_any_flags_set(action->flags,
                                    pcmk__action_optional|pcmk__action_runnable)
             && pcmk__str_eq(action->task, PCMK_ACTION_STOP, pcmk__str_none)) {
             /* Eventually we should just ignore the 'fence' case, but for now
              * it's the best way to detect (in CTS) when CIB resource updates
              * are being lost.
              */
             if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
                 || (scheduler->no_quorum_policy == pcmk_no_quorum_ignore)) {
                 const bool managed = pcmk_is_set(action->rsc->flags,
                                                  pcmk__rsc_managed);
                 const bool failed = pcmk_is_set(action->rsc->flags,
                                                 pcmk__rsc_failed);
 
                 crm_crit("Cannot %s %s because of %s:%s%s (%s)",
                          action->node->details->unclean? "fence" : "shut down",
                          pcmk__node_name(action->node), action->rsc->id,
                          (managed? " blocked" : " unmanaged"),
                          (failed? " failed" : ""), action->uuid);
             }
         }
 
         add_action_to_graph((gpointer) action, (gpointer) scheduler);
     }
 
     crm_log_xml_trace(scheduler->graph, "graph");
 }
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index 6713c7df51..1b8ae71cd0 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -1,1716 +1,1718 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 #include <stdint.h>                 // uint8_t, uint32_t
 
 #include <crm/common/xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 static void stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
                           bool optional);
 static void start_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
                            bool optional);
 static void demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
                             bool optional);
 static void promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node,
                              bool optional);
 static void assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node,
                               bool optional);
 
 #define RSC_ROLE_MAX    (pcmk_role_promoted + 1)
 
 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
     /* This array lists the immediate next role when transitioning from one role
      * to a target role. For example, when going from Stopped to Promoted, the
      * next role is Unpromoted, because the resource must be started before it
      * can be promoted. The current state then becomes Started, which is fed
      * into this array again, giving a next role of Promoted.
      *
      * Current role       Immediate next role   Final target role
      * ------------       -------------------   -----------------
      */
     /* Unknown */       { pcmk_role_unknown,    /* Unknown */
                           pcmk_role_stopped,    /* Stopped */
                           pcmk_role_stopped,    /* Started */
                           pcmk_role_stopped,    /* Unpromoted */
                           pcmk_role_stopped,    /* Promoted */
                         },
     /* Stopped */       { pcmk_role_stopped,    /* Unknown */
                           pcmk_role_stopped,    /* Stopped */
                           pcmk_role_started,    /* Started */
                           pcmk_role_unpromoted, /* Unpromoted */
                           pcmk_role_unpromoted, /* Promoted */
                         },
     /* Started */       { pcmk_role_stopped,    /* Unknown */
                           pcmk_role_stopped,    /* Stopped */
                           pcmk_role_started,    /* Started */
                           pcmk_role_unpromoted, /* Unpromoted */
                           pcmk_role_promoted,   /* Promoted */
                         },
     /* Unpromoted */    { pcmk_role_stopped,    /* Unknown */
                           pcmk_role_stopped,    /* Stopped */
                           pcmk_role_stopped,    /* Started */
                           pcmk_role_unpromoted, /* Unpromoted */
                           pcmk_role_promoted,   /* Promoted */
                         },
     /* Promoted  */     { pcmk_role_stopped,    /* Unknown */
                           pcmk_role_unpromoted, /* Stopped */
                           pcmk_role_unpromoted, /* Started */
                           pcmk_role_unpromoted, /* Unpromoted */
                           pcmk_role_promoted,   /* Promoted */
                         },
 };
 
 /*!
  * \internal
  * \brief Function to schedule actions needed for a role change
  *
  * \param[in,out] rsc       Resource whose role is changing
  * \param[in,out] node      Node where resource will be in its next role
  * \param[in]     optional  Whether scheduled actions should be optional
  */
 typedef void (*rsc_transition_fn)(pcmk_resource_t *rsc, pcmk_node_t *node,
                                   bool optional);
 
 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
     /* This array lists the function needed to transition directly from one role
      * to another. NULL indicates that nothing is needed.
      *
      * Current role         Transition function             Next role
      * ------------         -------------------             ----------
      */
     /* Unknown */       {   assert_role_error,              /* Unknown */
                             stop_resource,                  /* Stopped */
                             assert_role_error,              /* Started */
                             assert_role_error,              /* Unpromoted */
                             assert_role_error,              /* Promoted */
                         },
     /* Stopped */       {   assert_role_error,              /* Unknown */
                             NULL,                           /* Stopped */
                             start_resource,                 /* Started */
                             start_resource,                 /* Unpromoted */
                             assert_role_error,              /* Promoted */
                         },
     /* Started */       {   assert_role_error,              /* Unknown */
                             stop_resource,                  /* Stopped */
                             NULL,                           /* Started */
                             NULL,                           /* Unpromoted */
                             promote_resource,               /* Promoted */
                         },
     /* Unpromoted */    {   assert_role_error,              /* Unknown */
                             stop_resource,                  /* Stopped */
                             stop_resource,                  /* Started */
                             NULL,                           /* Unpromoted */
                             promote_resource,               /* Promoted */
                         },
     /* Promoted  */     {   assert_role_error,              /* Unknown */
                             demote_resource,                /* Stopped */
                             demote_resource,                /* Started */
                             demote_resource,                /* Unpromoted */
                             NULL,                           /* Promoted */
                         },
 };
 
 /*!
  * \internal
  * \brief Get a list of a resource's allowed nodes sorted by node score
  *
  * \param[in] rsc  Resource to check
  *
  * \return List of allowed nodes sorted by node score
  */
 static GList *
 sorted_allowed_nodes(const pcmk_resource_t *rsc)
 {
     if (rsc->private->allowed_nodes != NULL) {
         GList *nodes = g_hash_table_get_values(rsc->private->allowed_nodes);
 
         if (nodes != NULL) {
             return pcmk__sort_nodes(nodes, pcmk__current_node(rsc));
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Assign a resource to its best allowed node, if possible
  *
  * \param[in,out] rsc           Resource to choose a node for
  * \param[in]     prefer        If not \c NULL, prefer this node when all else
  *                              equal
  * \param[in]     stop_if_fail  If \c true and \p rsc can't be assigned to a
  *                              node, set next role to stopped and update
  *                              existing actions
  *
  * \return true if \p rsc could be assigned to a node, otherwise false
  *
  * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
  *       completely undo the assignment. A successful assignment can be either
  *       undone or left alone as final. A failed assignment has the same effect
  *       as calling pcmk__unassign_resource(); there are no side effects on
  *       roles or actions.
  */
 static bool
 assign_best_node(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
                  bool stop_if_fail)
 {
     GList *nodes = NULL;
     pcmk_node_t *chosen = NULL;
     pcmk_node_t *best = NULL;
     const pcmk_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
 
     if (prefer == NULL) {
         prefer = most_free_node;
     }
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_unassigned)) {
         // We've already finished assignment of resources to nodes
         return rsc->private->assigned_node != NULL;
     }
 
     // Sort allowed nodes by score
     nodes = sorted_allowed_nodes(rsc);
     if (nodes != NULL) {
         best = (pcmk_node_t *) nodes->data; // First node has best score
     }
 
     if ((prefer != NULL) && (nodes != NULL)) {
         // Get the allowed node version of prefer
         chosen = g_hash_table_lookup(rsc->private->allowed_nodes,
                                      prefer->private->id);
 
         if (chosen == NULL) {
             pcmk__rsc_trace(rsc, "Preferred node %s for %s was unknown",
                             pcmk__node_name(prefer), rsc->id);
 
         /* Favor the preferred node as long as its score is at least as good as
          * the best allowed node's.
          *
          * An alternative would be to favor the preferred node even if the best
          * node is better, when the best node's score is less than INFINITY.
          */
         } else if (chosen->assign->score < best->assign->score) {
             pcmk__rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
                             pcmk__node_name(chosen), rsc->id);
             chosen = NULL;
 
         } else if (!pcmk__node_available(chosen, true, false)) {
             pcmk__rsc_trace(rsc, "Preferred node %s for %s was unavailable",
                             pcmk__node_name(chosen), rsc->id);
             chosen = NULL;
 
         } else {
             pcmk__rsc_trace(rsc,
                             "Chose preferred node %s for %s "
                             "(ignoring %d candidates)",
                             pcmk__node_name(chosen), rsc->id,
                             g_list_length(nodes));
         }
     }
 
     if ((chosen == NULL) && (best != NULL)) {
         /* Either there is no preferred node, or the preferred node is not
          * suitable, but another node is allowed to run the resource.
          */
 
         chosen = best;
 
         if (!pcmk__is_unique_clone(rsc->private->parent)
             && (chosen->assign->score > 0) // Zero not acceptable
             && pcmk__node_available(chosen, false, false)) {
             /* If the resource is already running on a node, prefer that node if
              * it is just as good as the chosen node.
              *
              * We don't do this for unique clone instances, because
              * pcmk__assign_instances() has already assigned instances to their
              * running nodes when appropriate, and if we get here, we don't want
              * remaining unassigned instances to prefer a node that's already
              * running another instance.
              */
             pcmk_node_t *running = pcmk__current_node(rsc);
 
             if (running == NULL) {
                 // Nothing to do
 
             } else if (!pcmk__node_available(running, true, false)) {
                 pcmk__rsc_trace(rsc,
                                 "Current node for %s (%s) can't run resources",
                                 rsc->id, pcmk__node_name(running));
 
             } else {
                 int nodes_with_best_score = 1;
 
                 for (GList *iter = nodes->next; iter; iter = iter->next) {
                     pcmk_node_t *allowed = (pcmk_node_t *) iter->data;
 
                     if (allowed->assign->score != chosen->assign->score) {
                         // The nodes are sorted by score, so no more are equal
                         break;
                     }
                     if (pcmk__same_node(allowed, running)) {
                         // Scores are equal, so prefer the current node
                         chosen = allowed;
                     }
                     nodes_with_best_score++;
                 }
 
                 if (nodes_with_best_score > 1) {
                     uint8_t log_level = LOG_INFO;
 
                     if (chosen->assign->score >= PCMK_SCORE_INFINITY) {
                         log_level = LOG_WARNING;
                     }
                     do_crm_log(log_level,
                                "Chose %s for %s from %d nodes with score %s",
                                pcmk__node_name(chosen), rsc->id,
                                nodes_with_best_score,
                                pcmk_readable_score(chosen->assign->score));
                 }
             }
         }
 
         pcmk__rsc_trace(rsc, "Chose %s for %s from %d candidates",
                         pcmk__node_name(chosen), rsc->id, g_list_length(nodes));
     }
 
     pcmk__assign_resource(rsc, chosen, false, stop_if_fail);
     g_list_free(nodes);
     return rsc->private->assigned_node != NULL;
 }
 
 /*!
  * \internal
  * \brief Apply a "this with" colocation to a node's allowed node scores
  *
  * \param[in,out] colocation  Colocation to apply
  * \param[in,out] rsc         Resource being assigned
  */
 static void
 apply_this_with(pcmk__colocation_t *colocation, pcmk_resource_t *rsc)
 {
     GHashTable *archive = NULL;
     pcmk_resource_t *other = colocation->primary;
 
     // In certain cases, we will need to revert the node scores
     if ((colocation->dependent_role >= pcmk_role_promoted)
         || ((colocation->score < 0)
             && (colocation->score > -PCMK_SCORE_INFINITY))) {
         archive = pcmk__copy_node_table(rsc->private->allowed_nodes);
     }
 
     if (pcmk_is_set(other->flags, pcmk__rsc_unassigned)) {
         pcmk__rsc_trace(rsc,
                         "%s: Assigning colocation %s primary %s first"
                         "(score=%d role=%s)",
                         rsc->id, colocation->id, other->id,
                         colocation->score,
                         pcmk_role_text(colocation->dependent_role));
         other->private->cmds->assign(other, NULL, true);
     }
 
     // Apply the colocation score to this resource's allowed node scores
     rsc->private->cmds->apply_coloc_score(rsc, other, colocation, true);
     if ((archive != NULL)
         && !pcmk__any_node_available(rsc->private->allowed_nodes)) {
         pcmk__rsc_info(rsc,
                        "%s: Reverting scores from colocation with %s "
                        "because no nodes allowed",
                        rsc->id, other->id);
         g_hash_table_destroy(rsc->private->allowed_nodes);
         rsc->private->allowed_nodes = archive;
         archive = NULL;
     }
     if (archive != NULL) {
         g_hash_table_destroy(archive);
     }
 }
 
 /*!
  * \internal
  * \brief Update a Pacemaker Remote node once its connection has been assigned
  *
  * \param[in] connection  Connection resource that has been assigned
  */
 static void
 remote_connection_assigned(const pcmk_resource_t *connection)
 {
     pcmk_node_t *remote_node = pcmk_find_node(connection->private->scheduler,
                                               connection->id);
 
     CRM_CHECK(remote_node != NULL, return);
 
     if ((connection->private->assigned_node != NULL)
         && (connection->private->next_role != pcmk_role_stopped)) {
 
         crm_trace("Pacemaker Remote node %s will be online",
                   remote_node->private->id);
         remote_node->details->online = TRUE;
         if (!pcmk_is_set(remote_node->private->flags, pcmk__node_seen)) {
             // Avoid unnecessary fence, since we will attempt connection
             remote_node->details->unclean = FALSE;
         }
 
     } else {
         crm_trace("Pacemaker Remote node %s will be shut down "
                   "(%sassigned connection's next role is %s)",
                   remote_node->private->id,
                   ((connection->private->assigned_node == NULL)? "un" : ""),
                   pcmk_role_text(connection->private->next_role));
         remote_node->details->shutdown = TRUE;
     }
 }
 
 /*!
  * \internal
  * \brief Assign a primitive resource to a node
  *
  * \param[in,out] rsc           Resource to assign to a node
  * \param[in]     prefer        Node to prefer, if all else is equal
  * \param[in]     stop_if_fail  If \c true and \p rsc can't be assigned to a
  *                              node, set next role to stopped and update
  *                              existing actions
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  *
  * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
  *       completely undo the assignment. A successful assignment can be either
  *       undone or left alone as final. A failed assignment has the same effect
  *       as calling pcmk__unassign_resource(); there are no side effects on
  *       roles or actions.
  */
 pcmk_node_t *
 pcmk__primitive_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
                        bool stop_if_fail)
 {
     GList *this_with_colocations = NULL;
     GList *with_this_colocations = NULL;
     GList *iter = NULL;
     pcmk_resource_t *parent = NULL;
     pcmk__colocation_t *colocation = NULL;
     pcmk_scheduler_t *scheduler = NULL;
 
     CRM_ASSERT(pcmk__is_primitive(rsc));
     scheduler = rsc->private->scheduler;
     parent = rsc->private->parent;
 
     // Never assign a child without parent being assigned first
     if ((parent != NULL) && !pcmk_is_set(parent->flags, pcmk__rsc_assigning)) {
         pcmk__rsc_debug(rsc, "%s: Assigning parent %s first",
                         rsc->id, parent->id);
         parent->private->cmds->assign(parent, prefer, stop_if_fail);
     }
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_unassigned)) {
         // Assignment has already been done
         const char *node_name = "no node";
 
         if (rsc->private->assigned_node != NULL) {
             node_name = pcmk__node_name(rsc->private->assigned_node);
         }
         pcmk__rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id, node_name);
         return rsc->private->assigned_node;
     }
 
     // Ensure we detect assignment loops
     if (pcmk_is_set(rsc->flags, pcmk__rsc_assigning)) {
         pcmk__rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
         return NULL;
     }
     pcmk__set_rsc_flags(rsc, pcmk__rsc_assigning);
 
     pe__show_node_scores(true, rsc, "Pre-assignment",
                          rsc->private->allowed_nodes, scheduler);
 
     this_with_colocations = pcmk__this_with_colocations(rsc);
     with_this_colocations = pcmk__with_this_colocations(rsc);
 
     // Apply mandatory colocations first, to satisfy as many as possible
     for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
         colocation = iter->data;
 
         if ((colocation->score <= -PCMK_SCORE_INFINITY)
             || (colocation->score >= PCMK_SCORE_INFINITY)) {
             apply_this_with(colocation, rsc);
         }
     }
     for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
         colocation = iter->data;
 
         if ((colocation->score <= -PCMK_SCORE_INFINITY)
             || (colocation->score >= PCMK_SCORE_INFINITY)) {
             pcmk__add_dependent_scores(colocation, rsc);
         }
     }
 
     pe__show_node_scores(true, rsc, "Mandatory-colocations",
                          rsc->private->allowed_nodes, scheduler);
 
     // Then apply optional colocations
     for (iter = this_with_colocations; iter != NULL; iter = iter->next) {
         colocation = iter->data;
 
         if ((colocation->score > -PCMK_SCORE_INFINITY)
             && (colocation->score < PCMK_SCORE_INFINITY)) {
             apply_this_with(colocation, rsc);
         }
     }
     for (iter = with_this_colocations; iter != NULL; iter = iter->next) {
         colocation = iter->data;
 
         if ((colocation->score > -PCMK_SCORE_INFINITY)
             && (colocation->score < PCMK_SCORE_INFINITY)) {
             pcmk__add_dependent_scores(colocation, rsc);
         }
     }
 
     g_list_free(this_with_colocations);
     g_list_free(with_this_colocations);
 
     if (rsc->private->next_role == pcmk_role_stopped) {
         pcmk__rsc_trace(rsc,
                         "Banning %s from all nodes because it will be stopped",
                         rsc->id);
         resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
                           PCMK_META_TARGET_ROLE, scheduler);
 
     } else if ((rsc->private->next_role > rsc->private->orig_role)
                && !pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
                && (scheduler->no_quorum_policy == pcmk_no_quorum_freeze)) {
         crm_notice("Resource %s cannot be elevated from %s to %s due to "
                    PCMK_OPT_NO_QUORUM_POLICY "=" PCMK_VALUE_FREEZE,
                    rsc->id, pcmk_role_text(rsc->private->orig_role),
                    pcmk_role_text(rsc->private->next_role));
         pe__set_next_role(rsc, rsc->private->orig_role,
                           PCMK_OPT_NO_QUORUM_POLICY "=" PCMK_VALUE_FREEZE);
     }
 
     pe__show_node_scores(!pcmk_is_set(scheduler->flags,
                                       pcmk_sched_output_scores),
                          rsc, __func__, rsc->private->allowed_nodes, scheduler);
 
     // Unmanage resource if fencing is enabled but no device is configured
     if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
         && !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
         pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed);
     }
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
         // Unmanaged resources stay on their current node
         const char *reason = NULL;
         pcmk_node_t *assign_to = NULL;
 
         pe__set_next_role(rsc, rsc->private->orig_role, "unmanaged");
         assign_to = pcmk__current_node(rsc);
         if (assign_to == NULL) {
             reason = "inactive";
         } else if (rsc->private->orig_role == pcmk_role_promoted) {
             reason = "promoted";
         } else if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
             reason = "failed";
         } else {
             reason = "active";
         }
         pcmk__rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
                        (assign_to? assign_to->private->name : "no node"),
                        reason);
         pcmk__assign_resource(rsc, assign_to, true, stop_if_fail);
 
     } else if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_all)) {
         // Must stop at some point, but be consistent with stop_if_fail
         if (stop_if_fail) {
             pcmk__rsc_debug(rsc,
                             "Forcing %s to stop: " PCMK_OPT_STOP_ALL_RESOURCES,
                             rsc->id);
         }
         pcmk__assign_resource(rsc, NULL, true, stop_if_fail);
 
     } else if (!assign_best_node(rsc, prefer, stop_if_fail)) {
         // Assignment failed
         if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
             pcmk__rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
         } else if ((rsc->private->active_nodes != NULL) && stop_if_fail) {
             pcmk__rsc_info(rsc, "Stopping removed resource %s", rsc->id);
         }
     }
 
     pcmk__clear_rsc_flags(rsc, pcmk__rsc_assigning);
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
         remote_connection_assigned(rsc);
     }
 
     return rsc->private->assigned_node;
 }
 
 /*!
  * \internal
  * \brief Schedule actions to bring resource down and back to current role
  *
  * \param[in,out] rsc           Resource to restart
  * \param[in,out] current       Node that resource should be brought down on
  * \param[in]     need_stop     Whether the resource must be stopped
  * \param[in]     need_promote  Whether the resource must be promoted
  *
  * \return Role that resource would have after scheduled actions are taken
  */
 static void
 schedule_restart_actions(pcmk_resource_t *rsc, pcmk_node_t *current,
                          bool need_stop, bool need_promote)
 {
     enum rsc_role_e role = rsc->private->orig_role;
     enum rsc_role_e next_role;
     rsc_transition_fn fn = NULL;
 
     pcmk__set_rsc_flags(rsc, pcmk__rsc_restarting);
 
     // Bring resource down to a stop on its current node
     while (role != pcmk_role_stopped) {
         next_role = rsc_state_matrix[role][pcmk_role_stopped];
         pcmk__rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
                         (need_stop? "required" : "optional"), rsc->id,
                         pcmk_role_text(role), pcmk_role_text(next_role));
         fn = rsc_action_matrix[role][next_role];
         if (fn == NULL) {
             break;
         }
         fn(rsc, current, !need_stop);
         role = next_role;
     }
 
     // Bring resource up to its next role on its next node
     while ((rsc->private->orig_role <= rsc->private->next_role)
            && (role != rsc->private->orig_role)
            && !pcmk_is_set(rsc->flags, pcmk__rsc_blocked)) {
         bool required = need_stop;
 
         next_role = rsc_state_matrix[role][rsc->private->orig_role];
         if ((next_role == pcmk_role_promoted) && need_promote) {
             required = true;
         }
         pcmk__rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
                         (required? "required" : "optional"), rsc->id,
                         pcmk_role_text(role), pcmk_role_text(next_role));
         fn = rsc_action_matrix[role][next_role];
         if (fn == NULL) {
             break;
         }
         fn(rsc, rsc->private->assigned_node, !required);
         role = next_role;
     }
 
     pcmk__clear_rsc_flags(rsc, pcmk__rsc_restarting);
 }
 
 /*!
  * \internal
  * \brief If a resource's next role is not explicitly specified, set a default
  *
  * \param[in,out] rsc  Resource to set next role for
  *
  * \return "explicit" if next role was explicitly set, otherwise "implicit"
  */
 static const char *
 set_default_next_role(pcmk_resource_t *rsc)
 {
     if (rsc->private->next_role != pcmk_role_unknown) {
         return "explicit";
     }
 
     if (rsc->private->assigned_node == NULL) {
         pe__set_next_role(rsc, pcmk_role_stopped, "assignment");
     } else {
         pe__set_next_role(rsc, pcmk_role_started, "assignment");
     }
     return "implicit";
 }
 
 /*!
  * \internal
  * \brief Create an action to represent an already pending start
  *
  * \param[in,out] rsc  Resource to create start action for
  */
 static void
 create_pending_start(pcmk_resource_t *rsc)
 {
     pcmk_action_t *start = NULL;
 
     pcmk__rsc_trace(rsc,
                     "Creating action for %s to represent already pending start",
                     rsc->id);
     start = start_action(rsc, rsc->private->assigned_node, TRUE);
     pcmk__set_action_flags(start, pcmk__action_always_in_graph);
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to take a resource to its next role
  *
  * \param[in,out] rsc  Resource to schedule actions for
  */
 static void
 schedule_role_transition_actions(pcmk_resource_t *rsc)
 {
     enum rsc_role_e role = rsc->private->orig_role;
 
     while (role != rsc->private->next_role) {
         enum rsc_role_e next_role =
             rsc_state_matrix[role][rsc->private->next_role];
         rsc_transition_fn fn = NULL;
 
         pcmk__rsc_trace(rsc,
                         "Creating action to take %s from %s to %s "
                         "(ending at %s)",
                         rsc->id, pcmk_role_text(role),
                         pcmk_role_text(next_role),
                         pcmk_role_text(rsc->private->next_role));
         fn = rsc_action_matrix[role][next_role];
         if (fn == NULL) {
             break;
         }
         fn(rsc, rsc->private->assigned_node, false);
         role = next_role;
     }
 }
 
 /*!
  * \internal
  * \brief Create all actions needed for a given primitive resource
  *
  * \param[in,out] rsc  Primitive resource to create actions for
  */
 void
 pcmk__primitive_create_actions(pcmk_resource_t *rsc)
 {
     bool need_stop = false;
     bool need_promote = false;
     bool is_moving = false;
     bool allow_migrate = false;
     bool multiply_active = false;
 
     pcmk_node_t *current = NULL;
     pcmk_node_t *migration_target = NULL;
     unsigned int num_all_active = 0;
     unsigned int num_clean_active = 0;
     const char *next_role_source = NULL;
 
     CRM_ASSERT(pcmk__is_primitive(rsc));
 
     next_role_source = set_default_next_role(rsc);
     pcmk__rsc_trace(rsc,
                     "Creating all actions for %s transition from %s to %s "
                     "(%s) on %s",
                     rsc->id, pcmk_role_text(rsc->private->orig_role),
                     pcmk_role_text(rsc->private->next_role), next_role_source,
                     pcmk__node_name(rsc->private->assigned_node));
 
     current = rsc->private->fns->active_node(rsc, &num_all_active,
                                              &num_clean_active);
 
     g_list_foreach(rsc->private->dangling_migration_sources,
                    pcmk__abort_dangling_migration, rsc);
 
     if ((current != NULL) && (rsc->private->assigned_node != NULL)
         && !pcmk__same_node(current, rsc->private->assigned_node)
         && (rsc->private->next_role >= pcmk_role_started)) {
 
         pcmk__rsc_trace(rsc, "Moving %s from %s to %s",
                         rsc->id, pcmk__node_name(current),
                         pcmk__node_name(rsc->private->assigned_node));
         is_moving = true;
         allow_migrate = pcmk__rsc_can_migrate(rsc, current);
 
         // This is needed even if migrating (though I'm not sure why ...)
         need_stop = true;
     }
 
     // Check whether resource is partially migrated and/or multiply active
     migration_target = rsc->private->partial_migration_target;
     if ((rsc->private->partial_migration_source != NULL)
         && (migration_target != NULL) && allow_migrate && (num_all_active == 2)
         && pcmk__same_node(current, rsc->private->partial_migration_source)
         && pcmk__same_node(rsc->private->assigned_node, migration_target)) {
         /* A partial migration is in progress, and the migration target remains
          * the same as when the migration began.
          */
         pcmk__rsc_trace(rsc,
                         "Partial migration of %s from %s to %s will continue",
                         rsc->id,
                         pcmk__node_name(rsc->private->partial_migration_source),
                         pcmk__node_name(migration_target));
 
     } else if ((rsc->private->partial_migration_source != NULL)
                || (migration_target != NULL)) {
         // A partial migration is in progress but can't be continued
 
         if (num_all_active > 2) {
             // The resource is migrating *and* multiply active!
             crm_notice("Forcing recovery of %s because it is migrating "
                        "from %s to %s and possibly active elsewhere",
                        rsc->id,
                        pcmk__node_name(rsc->private->partial_migration_source),
                        pcmk__node_name(migration_target));
         } else {
             // The migration source or target isn't available
             crm_notice("Forcing recovery of %s because it can no longer "
                        "migrate from %s to %s",
                        rsc->id,
                        pcmk__node_name(rsc->private->partial_migration_source),
                        pcmk__node_name(migration_target));
         }
         need_stop = true;
         rsc->private->partial_migration_source = NULL;
         rsc->private->partial_migration_target = NULL;
         allow_migrate = false;
 
     } else if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) {
         multiply_active = (num_all_active > 1);
     } else {
         /* If a resource has PCMK_META_REQUIRES set to PCMK_VALUE_NOTHING or
          * PCMK_VALUE_QUORUM, don't consider it active on unclean nodes (similar
          * to how all resources behave when PCMK_OPT_STONITH_ENABLED is false).
          * We can start such resources elsewhere before fencing completes, and
          * if we considered the resource active on the failed node, we would
          * attempt recovery for being active on multiple nodes.
          */
         multiply_active = (num_clean_active > 1);
     }
 
     if (multiply_active) {
         const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
 
         // Resource was (possibly) incorrectly multiply active
-        pcmk__sched_err("%s resource %s might be active on %u nodes (%s)",
+        pcmk__sched_err(rsc->private->scheduler,
+                        "%s resource %s might be active on %u nodes (%s)",
                         pcmk__s(class, "Untyped"), rsc->id, num_all_active,
                         pcmk__multiply_active_text(rsc));
         crm_notice("For more information, see \"What are multiply active "
                    "resources?\" at "
                    "https://projects.clusterlabs.org/w/clusterlabs/faq/");
 
         switch (rsc->private->multiply_active_policy) {
             case pcmk__multiply_active_restart:
                 need_stop = true;
                 break;
             case pcmk__multiply_active_unexpected:
                 need_stop = true; // stop_resource() will skip expected node
                 pcmk__set_rsc_flags(rsc, pcmk__rsc_stop_unexpected);
                 break;
             default:
                 break;
         }
 
     } else {
         pcmk__clear_rsc_flags(rsc, pcmk__rsc_stop_unexpected);
     }
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_start_pending)) {
         create_pending_start(rsc);
     }
 
     if (is_moving) {
         // Remaining tests are only for resources staying where they are
 
     } else if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
         if (pcmk_is_set(rsc->flags, pcmk__rsc_stop_if_failed)) {
             need_stop = true;
             pcmk__rsc_trace(rsc, "Recovering %s", rsc->id);
         } else {
             pcmk__rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
             if (rsc->private->next_role == pcmk_role_promoted) {
                 need_promote = true;
             }
         }
 
     } else if (pcmk_is_set(rsc->flags, pcmk__rsc_blocked)) {
         pcmk__rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
         need_stop = true;
 
     } else if ((rsc->private->orig_role > pcmk_role_started)
                && (current != NULL)
                && (rsc->private->assigned_node != NULL)) {
         pcmk_action_t *start = NULL;
 
         pcmk__rsc_trace(rsc, "Creating start action for promoted resource %s",
                         rsc->id);
         start = start_action(rsc, rsc->private->assigned_node, TRUE);
         if (!pcmk_is_set(start->flags, pcmk__action_optional)) {
             // Recovery of a promoted resource
             pcmk__rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
             need_stop = true;
         }
     }
 
     // Create any actions needed to bring resource down and back up to same role
     schedule_restart_actions(rsc, current, need_stop, need_promote);
 
     // Create any actions needed to take resource from this role to the next
     schedule_role_transition_actions(rsc);
 
     pcmk__create_recurring_actions(rsc);
 
     if (allow_migrate) {
         pcmk__create_migration_actions(rsc, current);
     }
 }
 
 /*!
  * \internal
  * \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes
  *
  * \param[in] rsc  Resource to check
  */
 static void
 rsc_avoids_remote_nodes(const pcmk_resource_t *rsc)
 {
     GHashTableIter iter;
     pcmk_node_t *node = NULL;
 
     g_hash_table_iter_init(&iter, rsc->private->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         if (node->private->remote != NULL) {
             node->assign->score = -PCMK_SCORE_INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Return allowed nodes as (possibly sorted) list
  *
  * Convert a resource's hash table of allowed nodes to a list. If printing to
  * stdout, sort the list, to keep action ID numbers consistent for regression
  * test output (while avoiding the performance hit on a live cluster).
  *
  * \param[in] rsc       Resource to check for allowed nodes
  *
  * \return List of resource's allowed nodes
  * \note Callers should take care not to rely on the list being sorted.
  */
 static GList *
 allowed_nodes_as_list(const pcmk_resource_t *rsc)
 {
     GList *allowed_nodes = NULL;
 
     if (rsc->private->allowed_nodes != NULL) {
         allowed_nodes = g_hash_table_get_values(rsc->private->allowed_nodes);
     }
 
     if (!pcmk__is_daemon) {
         allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
     }
 
     return allowed_nodes;
 }
 
 /*!
  * \internal
  * \brief Create implicit constraints needed for a primitive resource
  *
  * \param[in,out] rsc  Primitive resource to create implicit constraints for
  */
 void
 pcmk__primitive_internal_constraints(pcmk_resource_t *rsc)
 {
     GList *allowed_nodes = NULL;
     bool check_unfencing = false;
     bool check_utilization = false;
     pcmk_scheduler_t *scheduler = NULL;
 
     CRM_ASSERT(pcmk__is_primitive(rsc));
     scheduler = rsc->private->scheduler;
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
         pcmk__rsc_trace(rsc,
                         "Skipping implicit constraints for unmanaged resource "
                         "%s", rsc->id);
         return;
     }
 
     // Whether resource requires unfencing
     check_unfencing = !pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)
                       && pcmk_is_set(scheduler->flags,
                                      pcmk_sched_enable_unfencing)
                       && pcmk_is_set(rsc->flags, pcmk__rsc_needs_unfencing);
 
     // Whether a non-default placement strategy is used
     check_utilization = (g_hash_table_size(rsc->private->utilization) > 0)
                          && !pcmk__str_eq(scheduler->placement_strategy,
                                           PCMK_VALUE_DEFAULT, pcmk__str_casei);
 
     // Order stops before starts (i.e. restart)
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0), NULL,
                        rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0), NULL,
                        pcmk__ar_ordered
                        |pcmk__ar_first_implies_then
                        |pcmk__ar_intermediate_stop, scheduler);
 
     // Promotable ordering: demote before stop, start before promote
     if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                     pcmk__rsc_promotable)
         || (rsc->private->orig_role > pcmk_role_unpromoted)) {
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_DEMOTE, 0),
                            NULL,
                            rsc, pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
                            NULL,
                            pcmk__ar_promoted_then_implies_first, scheduler);
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
                            NULL,
                            rsc, pcmk__op_key(rsc->id, PCMK_ACTION_PROMOTE, 0),
                            NULL,
                            pcmk__ar_unrunnable_first_blocks, scheduler);
     }
 
     // Don't clear resource history if probing on same node
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, PCMK_ACTION_LRM_DELETE, 0),
                        NULL, rsc,
                        pcmk__op_key(rsc->id, PCMK_ACTION_MONITOR, 0),
                        NULL,
                        pcmk__ar_if_on_same_node|pcmk__ar_then_cancels_first,
                        scheduler);
 
     // Certain checks need allowed nodes
     if (check_unfencing || check_utilization
         || (rsc->private->launcher != NULL)) {
 
         allowed_nodes = allowed_nodes_as_list(rsc);
     }
 
     if (check_unfencing) {
         g_list_foreach(allowed_nodes, pcmk__order_restart_vs_unfence, rsc);
     }
 
     if (check_utilization) {
         pcmk__create_utilization_constraints(rsc, allowed_nodes);
     }
 
     if (rsc->private->launcher != NULL) {
         pcmk_resource_t *remote_rsc = NULL;
 
         if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
             // rsc is the implicit remote connection for a guest or bundle node
 
             /* Guest resources are not allowed to run on Pacemaker Remote nodes,
              * to avoid nesting remotes. However, bundles are allowed.
              */
             if (!pcmk_is_set(rsc->flags, pcmk__rsc_remote_nesting_allowed)) {
                 rsc_avoids_remote_nodes(rsc->private->launcher);
             }
 
             /* If someone cleans up a guest or bundle node's launcher, we will
              * likely schedule a (re-)probe of the launcher and recovery of the
              * connection. Order the connection stop after the launcher probe,
              * so that if we detect the launcher running, we will trigger a new
              * transition and avoid the unnecessary recovery.
              */
             pcmk__order_resource_actions(rsc->private->launcher,
                                          PCMK_ACTION_MONITOR,
                                          rsc, PCMK_ACTION_STOP,
                                          pcmk__ar_ordered);
 
         /* A user can specify that a resource must start on a Pacemaker Remote
          * node by explicitly configuring it with the PCMK__META_CONTAINER
          * meta-attribute. This is of questionable merit, since location
          * constraints can accomplish the same thing. But we support it, so here
          * we check whether a resource (that is not itself a remote connection)
          * has PCMK__META_CONTAINER set to a remote node or guest node resource.
          */
         } else if (pcmk_is_set(rsc->private->launcher->flags,
                                pcmk__rsc_is_remote_connection)) {
             remote_rsc = rsc->private->launcher;
         } else  {
             remote_rsc =
                 pe__resource_contains_guest_node(scheduler,
                                                  rsc->private->launcher);
         }
 
         if (remote_rsc != NULL) {
             /* Force the resource on the Pacemaker Remote node instead of
              * colocating the resource with the launcher.
              */
             for (GList *item = allowed_nodes; item; item = item->next) {
                 pcmk_node_t *node = item->data;
 
                 if (node->private->remote != remote_rsc) {
                     node->assign->score = -PCMK_SCORE_INFINITY;
                 }
             }
 
         } else {
             /* This resource is either launched by a resource that does NOT
              * represent a Pacemaker Remote node, or a Pacemaker Remote
              * connection resource for a guest node or bundle.
              */
             int score;
 
             crm_trace("Order and colocate %s relative to its launcher %s",
                       rsc->id, rsc->private->launcher->id);
 
             pcmk__new_ordering(rsc->private->launcher,
                                pcmk__op_key(rsc->private->launcher->id,
                                             PCMK_ACTION_START, 0),
                                NULL, rsc,
                                pcmk__op_key(rsc->id, PCMK_ACTION_START, 0),
                                NULL,
                                pcmk__ar_first_implies_then
                                |pcmk__ar_unrunnable_first_blocks, scheduler);
 
             pcmk__new_ordering(rsc,
                                pcmk__op_key(rsc->id, PCMK_ACTION_STOP, 0),
                                NULL,
                                rsc->private->launcher,
                                pcmk__op_key(rsc->private->launcher->id,
                                             PCMK_ACTION_STOP, 0),
                                NULL, pcmk__ar_then_implies_first, scheduler);
 
             if (pcmk_is_set(rsc->flags, pcmk__rsc_remote_nesting_allowed)) {
                 score = 10000;    /* Highly preferred but not essential */
             } else {
                 score = PCMK_SCORE_INFINITY; // Force to run on same host
             }
             pcmk__new_colocation("#resource-with-container", NULL, score, rsc,
                                  rsc->private->launcher, NULL, NULL,
                                  pcmk__coloc_influence);
         }
     }
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)
         || pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) {
         /* Remote connections and fencing devices are not allowed to run on
          * Pacemaker Remote nodes
          */
         rsc_avoids_remote_nodes(rsc);
     }
     g_list_free(allowed_nodes);
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node scores or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node scores (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
                                   const pcmk_resource_t *primary,
                                   const pcmk__colocation_t *colocation,
                                   bool for_dependent)
 {
     enum pcmk__coloc_affects filter_results;
 
     CRM_ASSERT((dependent != NULL) && (primary != NULL)
                && (colocation != NULL));
 
     if (for_dependent) {
         // Always process on behalf of primary resource
         primary->private->cmds->apply_coloc_score(dependent, primary,
                                                   colocation, false);
         return;
     }
 
     filter_results = pcmk__colocation_affects(dependent, primary, colocation,
                                               false);
     pcmk__rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
                     ((colocation->score > 0)? "Colocating" : "Anti-colocating"),
                     dependent->id, primary->id, colocation->id,
                     colocation->score,
                     filter_results);
 
     switch (filter_results) {
         case pcmk__coloc_affects_role:
             pcmk__apply_coloc_to_priority(dependent, primary, colocation);
             break;
         case pcmk__coloc_affects_location:
             pcmk__apply_coloc_to_scores(dependent, primary, colocation);
             break;
         default: // pcmk__coloc_affects_nothing
             return;
     }
 }
 
 /* Primitive implementation of
  * pcmk__assignment_methods_t:with_this_colocations()
  */
 void
 pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
                                  const pcmk_resource_t *orig_rsc, GList **list)
 {
     const pcmk_resource_t *parent = NULL;
 
     CRM_ASSERT(pcmk__is_primitive(rsc) && (list != NULL));
     parent = rsc->private->parent;
 
     if (rsc == orig_rsc) {
         /* For the resource itself, add all of its own colocations and relevant
          * colocations from its parent (if any).
          */
         pcmk__add_with_this_list(list, rsc->private->with_this_colocations,
                                  orig_rsc);
         if (parent != NULL) {
             parent->private->cmds->with_this_colocations(parent, orig_rsc,
                                                          list);
         }
     } else {
         // For an ancestor, add only explicitly configured constraints
         for (GList *iter = rsc->private->with_this_colocations;
              iter != NULL; iter = iter->next) {
             pcmk__colocation_t *colocation = iter->data;
 
             if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
                 pcmk__add_with_this(list, colocation, orig_rsc);
             }
         }
     }
 }
 
 /* Primitive implementation of
  * pcmk__assignment_methods_t:this_with_colocations()
  */
 void
 pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
                                  const pcmk_resource_t *orig_rsc, GList **list)
 {
     const pcmk_resource_t *parent = NULL;
 
     CRM_ASSERT(pcmk__is_primitive(rsc) && (list != NULL));
     parent = rsc->private->parent;
 
     if (rsc == orig_rsc) {
         /* For the resource itself, add all of its own colocations and relevant
          * colocations from its parent (if any).
          */
         pcmk__add_this_with_list(list, rsc->private->this_with_colocations,
                                  orig_rsc);
         if (parent != NULL) {
             parent->private->cmds->this_with_colocations(parent, orig_rsc,
                                                          list);
         }
     } else {
         // For an ancestor, add only explicitly configured constraints
         for (GList *iter = rsc->private->this_with_colocations;
              iter != NULL; iter = iter->next) {
             pcmk__colocation_t *colocation = iter->data;
 
             if (pcmk_is_set(colocation->flags, pcmk__coloc_explicit)) {
                 pcmk__add_this_with(list, colocation, orig_rsc);
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Return action flags for a given primitive resource action
  *
  * \param[in,out] action  Action to get flags for
  * \param[in]     node    If not NULL, limit effects to this node (ignored)
  *
  * \return Flags appropriate to \p action on \p node
  */
 uint32_t
 pcmk__primitive_action_flags(pcmk_action_t *action, const pcmk_node_t *node)
 {
     CRM_ASSERT(action != NULL);
     return (uint32_t) action->flags;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is a multiply active resource's expected node
  *
  * \param[in] rsc  Resource to check
  * \param[in] node  Node to check
  *
  * \return \c true if \p rsc is multiply active with
  *         \c PCMK_META_MULTIPLE_ACTIVE set to \c PCMK_VALUE_STOP_UNEXPECTED,
  *         and \p node is the node where it will remain active
  * \note This assumes that the resource's next role cannot be changed to stopped
  *       after this is called, which should be reasonable if status has already
  *       been unpacked and resources have been assigned to nodes.
  */
 static bool
 is_expected_node(const pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     return pcmk_all_flags_set(rsc->flags,
                               pcmk__rsc_stop_unexpected|pcmk__rsc_restarting)
            && (rsc->private->next_role > pcmk_role_stopped)
            && pcmk__same_node(rsc->private->assigned_node, node);
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to stop a resource wherever it is active
  *
  * \param[in,out] rsc       Resource being stopped
  * \param[in]     node      Node where resource is being stopped (ignored)
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 stop_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
 {
     for (GList *iter = rsc->private->active_nodes;
          iter != NULL; iter = iter->next) {
 
         pcmk_node_t *current = (pcmk_node_t *) iter->data;
         pcmk_action_t *stop = NULL;
 
         if (is_expected_node(rsc, current)) {
             /* We are scheduling restart actions for a multiply active resource
              * with PCMK_META_MULTIPLE_ACTIVE=PCMK_VALUE_STOP_UNEXPECTED, and
              * this is where it should not be stopped.
              */
             pcmk__rsc_trace(rsc,
                             "Skipping stop of multiply active resource %s "
                             "on expected node %s",
                             rsc->id, pcmk__node_name(current));
             continue;
         }
 
         if (rsc->private->partial_migration_target != NULL) {
             // Continue migration if node originally was and remains target
             if (pcmk__same_node(current, rsc->private->partial_migration_target)
                 && pcmk__same_node(current, rsc->private->assigned_node)) {
                 pcmk__rsc_trace(rsc,
                                 "Skipping stop of %s on %s "
                                 "because partial migration there will continue",
                                 rsc->id, pcmk__node_name(current));
                 continue;
             } else {
                 pcmk__rsc_trace(rsc,
                                 "Forcing stop of %s on %s "
                                 "because migration target changed",
                                 rsc->id, pcmk__node_name(current));
                 optional = false;
             }
         }
 
         pcmk__rsc_trace(rsc, "Scheduling stop of %s on %s",
                         rsc->id, pcmk__node_name(current));
         stop = stop_action(rsc, current, optional);
 
         if (rsc->private->assigned_node == NULL) {
             pe_action_set_reason(stop, "node availability", true);
         } else if (pcmk_all_flags_set(rsc->flags, pcmk__rsc_restarting
                                                   |pcmk__rsc_stop_unexpected)) {
             /* We are stopping a multiply active resource on a node that is
              * not its expected node, and we are still scheduling restart
              * actions, so the stop is for being multiply active.
              */
             pe_action_set_reason(stop, "being multiply active", true);
         }
 
         if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
             pcmk__clear_action_flags(stop, pcmk__action_runnable);
         }
 
         if (pcmk_is_set(rsc->private->scheduler->flags,
                         pcmk_sched_remove_after_stop)) {
             pcmk__schedule_cleanup(rsc, current, optional);
         }
 
         if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_unfencing)) {
             pcmk_action_t *unfence = pe_fence_op(current, PCMK_ACTION_ON, true,
                                                  NULL, false,
                                                  rsc->private->scheduler);
 
             order_actions(stop, unfence, pcmk__ar_then_implies_first);
             if (!pcmk__node_unfenced(current)) {
-                pcmk__sched_err("Stopping %s until %s can be unfenced",
+                pcmk__sched_err(rsc->private->scheduler,
+                                "Stopping %s until %s can be unfenced",
                                 rsc->id, pcmk__node_name(current));
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to start a resource on a node
  *
  * \param[in,out] rsc       Resource being started
  * \param[in,out] node      Node where resource should be started
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 start_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
 {
     pcmk_action_t *start = NULL;
 
     CRM_ASSERT(node != NULL);
 
     pcmk__rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)",
                     (optional? "optional" : "required"), rsc->id,
                     pcmk__node_name(node), node->assign->score);
     start = start_action(rsc, node, TRUE);
 
     pcmk__order_vs_unfence(rsc, node, start, pcmk__ar_first_implies_then);
 
     if (pcmk_is_set(start->flags, pcmk__action_runnable) && !optional) {
         pcmk__clear_action_flags(start, pcmk__action_optional);
     }
 
     if (is_expected_node(rsc, node)) {
         /* This could be a problem if the start becomes necessary for other
          * reasons later.
          */
         pcmk__rsc_trace(rsc,
                         "Start of multiply active resouce %s "
                         "on expected node %s will be a pseudo-action",
                         rsc->id, pcmk__node_name(node));
         pcmk__set_action_flags(start, pcmk__action_pseudo);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to promote a resource on a node
  *
  * \param[in,out] rsc       Resource being promoted
  * \param[in]     node      Node where resource should be promoted
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 promote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
 {
     GList *iter = NULL;
     GList *action_list = NULL;
     bool runnable = true;
 
     CRM_ASSERT(node != NULL);
 
     // Any start must be runnable for promotion to be runnable
     action_list = pe__resource_actions(rsc, node, PCMK_ACTION_START, true);
     for (iter = action_list; iter != NULL; iter = iter->next) {
         pcmk_action_t *start = (pcmk_action_t *) iter->data;
 
         if (!pcmk_is_set(start->flags, pcmk__action_runnable)) {
             runnable = false;
         }
     }
     g_list_free(action_list);
 
     if (runnable) {
         pcmk_action_t *promote = promote_action(rsc, node, optional);
 
         pcmk__rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
                         (optional? "optional" : "required"), rsc->id,
                         pcmk__node_name(node));
 
         if (is_expected_node(rsc, node)) {
             /* This could be a problem if the promote becomes necessary for
              * other reasons later.
              */
             pcmk__rsc_trace(rsc,
                             "Promotion of multiply active resouce %s "
                             "on expected node %s will be a pseudo-action",
                             rsc->id, pcmk__node_name(node));
             pcmk__set_action_flags(promote, pcmk__action_pseudo);
         }
     } else {
         pcmk__rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
                         rsc->id, pcmk__node_name(node));
         action_list = pe__resource_actions(rsc, node, PCMK_ACTION_PROMOTE,
                                            true);
         for (iter = action_list; iter != NULL; iter = iter->next) {
             pcmk_action_t *promote = (pcmk_action_t *) iter->data;
 
             pcmk__clear_action_flags(promote, pcmk__action_runnable);
         }
         g_list_free(action_list);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to demote a resource wherever it is active
  *
  * \param[in,out] rsc       Resource being demoted
  * \param[in]     node      Node where resource should be demoted (ignored)
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 demote_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
 {
     /* Since this will only be called for a primitive (possibly as an instance
      * of a collective resource), the resource is multiply active if it is
      * running on more than one node, so we want to demote on all of them as
      * part of recovery, regardless of which one is the desired node.
      */
     for (GList *iter = rsc->private->active_nodes;
          iter != NULL; iter = iter->next) {
 
         pcmk_node_t *current = (pcmk_node_t *) iter->data;
 
         if (is_expected_node(rsc, current)) {
             pcmk__rsc_trace(rsc,
                             "Skipping demote of multiply active resource %s "
                             "on expected node %s",
                             rsc->id, pcmk__node_name(current));
         } else {
             pcmk__rsc_trace(rsc, "Scheduling %s demotion of %s on %s",
                             (optional? "optional" : "required"), rsc->id,
                             pcmk__node_name(current));
             demote_action(rsc, current, optional);
         }
     }
 }
 
 static void
 assert_role_error(pcmk_resource_t *rsc, pcmk_node_t *node, bool optional)
 {
     CRM_ASSERT(false);
 }
 
 /*!
  * \internal
  * \brief Schedule cleanup of a resource
  *
  * \param[in,out] rsc       Resource to clean up
  * \param[in]     node      Node to clean up on
  * \param[in]     optional  Whether clean-up should be optional
  */
 void
 pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
                        bool optional)
 {
     /* If the cleanup is required, its orderings are optional, because they're
      * relevant only if both actions are required. Conversely, if the cleanup is
      * optional, the orderings make the then action required if the first action
      * becomes required.
      */
     uint32_t flag = optional? pcmk__ar_first_implies_then : pcmk__ar_ordered;
 
     CRM_CHECK((rsc != NULL) && (node != NULL), return);
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
         pcmk__rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
                         rsc->id, pcmk__node_name(node));
         return;
     }
 
     if (node->details->unclean || !node->details->online) {
         pcmk__rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable",
                         rsc->id, pcmk__node_name(node));
         return;
     }
 
     crm_notice("Scheduling clean-up of %s on %s",
                rsc->id, pcmk__node_name(node));
     delete_action(rsc, node, optional);
 
     // stop -> clean-up -> start
     pcmk__order_resource_actions(rsc, PCMK_ACTION_STOP,
                                  rsc, PCMK_ACTION_DELETE, flag);
     pcmk__order_resource_actions(rsc, PCMK_ACTION_DELETE,
                                  rsc, PCMK_ACTION_START, flag);
 }
 
 /*!
  * \internal
  * \brief Add primitive meta-attributes relevant to graph actions to XML
  *
  * \param[in]     rsc  Primitive resource whose meta-attributes should be added
  * \param[in,out] xml  Transition graph action attributes XML to add to
  */
 void
 pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
 {
     char *name = NULL;
     char *value = NULL;
     const pcmk_resource_t *parent = NULL;
 
     CRM_ASSERT(pcmk__is_primitive(rsc) && (xml != NULL));
 
     /* Clone instance numbers get set internally as meta-attributes, and are
      * needed in the transition graph (for example, to tell unique clone
      * instances apart).
      */
     value = g_hash_table_lookup(rsc->private->meta, PCMK__META_CLONE);
     if (value != NULL) {
         name = crm_meta_name(PCMK__META_CLONE);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     // Not sure if this one is really needed ...
     value = g_hash_table_lookup(rsc->private->meta, PCMK_META_REMOTE_NODE);
     if (value != NULL) {
         name = crm_meta_name(PCMK_META_REMOTE_NODE);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     /* The PCMK__META_CONTAINER meta-attribute can be set on the primitive
      * itself or one of its ancestors, so check them all and keep the highest.
      */
     for (parent = rsc; parent != NULL; parent = parent->private->parent) {
         if (parent->private->launcher != NULL) {
             crm_xml_add(xml, CRM_META "_" PCMK__META_CONTAINER,
                         parent->private->launcher->id);
         }
     }
 
     /* Bundle replica children will get their external-ip set internally as a
      * meta-attribute. The graph action needs it, but under a different naming
      * convention than other meta-attributes.
      */
     value = g_hash_table_lookup(rsc->private->meta, "external-ip");
     if (value != NULL) {
         crm_xml_add(xml, "pcmk_external_ip", value);
     }
 }
 
 // Primitive implementation of pcmk__assignment_methods_t:add_utilization()
 void
 pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
                                 const pcmk_resource_t *orig_rsc,
                                 GList *all_rscs, GHashTable *utilization)
 {
     CRM_ASSERT(pcmk__is_primitive(rsc)
                && (orig_rsc != NULL) && (utilization != NULL));
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_unassigned)) {
         return;
     }
 
     pcmk__rsc_trace(orig_rsc,
                     "%s: Adding primitive %s as colocated utilization",
                     orig_rsc->id, rsc->id);
     pcmk__release_node_capacity(utilization, rsc);
 }
 
 /*!
  * \internal
  * \brief Get epoch time of node's shutdown attribute (or now if none)
  *
  * \param[in,out] node  Node to check
  *
  * \return Epoch time corresponding to shutdown attribute if set or now if not
  */
 static time_t
 shutdown_time(pcmk_node_t *node)
 {
     const char *shutdown = pcmk__node_attr(node, PCMK__NODE_ATTR_SHUTDOWN, NULL,
                                            pcmk__rsc_node_current);
     time_t result = 0;
 
     if (shutdown != NULL) {
         long long result_ll;
 
         if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
             result = (time_t) result_ll;
         }
     }
     return (result == 0)? get_effective_time(node->private->scheduler) : result;
 }
 
 /*!
  * \internal
  * \brief Ban a resource from a node if it's not locked to the node
  *
  * \param[in]     data       Node to check
  * \param[in,out] user_data  Resource to check
  */
 static void
 ban_if_not_locked(gpointer data, gpointer user_data)
 {
     const pcmk_node_t *node = (const pcmk_node_t *) data;
     pcmk_resource_t *rsc = (pcmk_resource_t *) user_data;
 
     if (!pcmk__same_node(node, rsc->private->lock_node)) {
         resource_location(rsc, node, -PCMK_SCORE_INFINITY,
                           PCMK_OPT_SHUTDOWN_LOCK, rsc->private->scheduler);
     }
 }
 
 // Primitive implementation of pcmk__assignment_methods_t:shutdown_lock()
 void
 pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc)
 {
     const char *class = NULL;
     pcmk_scheduler_t *scheduler = NULL;
 
     CRM_ASSERT(pcmk__is_primitive(rsc));
     scheduler = rsc->private->scheduler;
 
     class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
 
     // Fence devices and remote connections can't be locked
     if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
         || pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
         return;
     }
 
     if (rsc->private->lock_node != NULL) {
         // The lock was obtained from resource history
 
         if (rsc->private->active_nodes != NULL) {
             /* The resource was started elsewhere even though it is now
              * considered locked. This shouldn't be possible, but as a
              * failsafe, we don't want to disturb the resource now.
              */
             pcmk__rsc_info(rsc,
                            "Cancelling shutdown lock "
                            "because %s is already active", rsc->id);
             pe__clear_resource_history(rsc, rsc->private->lock_node);
             rsc->private->lock_node = NULL;
             rsc->private->lock_time = 0;
         }
 
     // Only a resource active on exactly one node can be locked
     } else if (pcmk__list_of_1(rsc->private->active_nodes)) {
         pcmk_node_t *node = rsc->private->active_nodes->data;
 
         if (node->details->shutdown) {
             if (node->details->unclean) {
                 pcmk__rsc_debug(rsc,
                                 "Not locking %s to unclean %s for shutdown",
                                 rsc->id, pcmk__node_name(node));
             } else {
                 rsc->private->lock_node = node;
                 rsc->private->lock_time = shutdown_time(node);
             }
         }
     }
 
     if (rsc->private->lock_node == NULL) {
         // No lock needed
         return;
     }
 
     if (scheduler->shutdown_lock > 0) {
         time_t lock_expiration;
 
         lock_expiration = rsc->private->lock_time + scheduler->shutdown_lock;
         pcmk__rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
                        rsc->id, pcmk__node_name(rsc->private->lock_node),
                        (long long) lock_expiration);
         pe__update_recheck_time(++lock_expiration, scheduler,
                                 "shutdown lock expiration");
     } else {
         pcmk__rsc_info(rsc, "Locking %s to %s due to shutdown",
                        rsc->id, pcmk__node_name(rsc->private->lock_node));
     }
 
     // If resource is locked to one node, ban it from all other nodes
     g_list_foreach(scheduler->nodes, ban_if_not_locked, rsc);
 }
diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c
index 13144b9a61..1e4dc13691 100644
--- a/lib/pacemaker/pcmk_sched_promotable.c
+++ b/lib/pacemaker/pcmk_sched_promotable.c
@@ -1,1378 +1,1379 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/common/xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Add implicit promotion ordering for a promotable instance
  *
  * \param[in,out] clone  Clone resource
  * \param[in,out] child  Instance of \p clone being ordered
  * \param[in,out] last   Previous instance ordered (NULL if \p child is first)
  */
 static void
 order_instance_promotion(pcmk_resource_t *clone, pcmk_resource_t *child,
                          pcmk_resource_t *last)
 {
     // "Promote clone" -> promote instance -> "clone promoted"
     pcmk__order_resource_actions(clone, PCMK_ACTION_PROMOTE,
                                  child, PCMK_ACTION_PROMOTE,
                                  pcmk__ar_ordered);
     pcmk__order_resource_actions(child, PCMK_ACTION_PROMOTE,
                                  clone, PCMK_ACTION_PROMOTED,
                                  pcmk__ar_ordered);
 
     // If clone is ordered, order this instance relative to last
     if ((last != NULL) && pe__clone_is_ordered(clone)) {
         pcmk__order_resource_actions(last, PCMK_ACTION_PROMOTE,
                                      child, PCMK_ACTION_PROMOTE,
                                      pcmk__ar_ordered);
     }
 }
 
 /*!
  * \internal
  * \brief Add implicit demotion ordering for a promotable instance
  *
  * \param[in,out] clone  Clone resource
  * \param[in,out] child  Instance of \p clone being ordered
  * \param[in]     last   Previous instance ordered (NULL if \p child is first)
  */
 static void
 order_instance_demotion(pcmk_resource_t *clone, pcmk_resource_t *child,
                         pcmk_resource_t *last)
 {
     // "Demote clone" -> demote instance -> "clone demoted"
     pcmk__order_resource_actions(clone, PCMK_ACTION_DEMOTE, child,
                                  PCMK_ACTION_DEMOTE,
                                  pcmk__ar_then_implies_first_graphed);
     pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE,
                                  clone, PCMK_ACTION_DEMOTED,
                                  pcmk__ar_first_implies_then_graphed);
 
     // If clone is ordered, order this instance relative to last
     if ((last != NULL) && pe__clone_is_ordered(clone)) {
         pcmk__order_resource_actions(child, PCMK_ACTION_DEMOTE, last,
                                      PCMK_ACTION_DEMOTE, pcmk__ar_ordered);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether an instance will be promoted or demoted
  *
  * \param[in]  rsc        Instance to check
  * \param[out] demoting   If \p rsc will be demoted, this will be set to true
  * \param[out] promoting  If \p rsc will be promoted, this will be set to true
  */
 static void
 check_for_role_change(const pcmk_resource_t *rsc, bool *demoting,
                       bool *promoting)
 {
     const GList *iter = NULL;
 
     // If this is a cloned group, check group members recursively
     if (rsc->private->children != NULL) {
         for (iter = rsc->private->children; iter != NULL; iter = iter->next) {
             check_for_role_change((const pcmk_resource_t *) iter->data,
                                   demoting, promoting);
         }
         return;
     }
 
     for (iter = rsc->private->actions; iter != NULL; iter = iter->next) {
         const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
 
         if (*promoting && *demoting) {
             return;
 
         } else if (pcmk_is_set(action->flags, pcmk__action_optional)) {
             continue;
 
         } else if (pcmk__str_eq(PCMK_ACTION_DEMOTE, action->task,
                                 pcmk__str_none)) {
             *demoting = true;
 
         } else if (pcmk__str_eq(PCMK_ACTION_PROMOTE, action->task,
                                 pcmk__str_none)) {
             *promoting = true;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Add promoted-role location constraint scores to an instance's priority
  *
  * Adjust a promotable clone instance's promotion priority by the scores of any
  * location constraints in a list that are both limited to the promoted role and
  * for the node where the instance will be placed.
  *
  * \param[in,out] child                 Promotable clone instance
  * \param[in]     location_constraints  List of location constraints to apply
  * \param[in]     chosen                Node where \p child will be placed
  */
 static void
 apply_promoted_locations(pcmk_resource_t *child,
                          const GList *location_constraints,
                          const pcmk_node_t *chosen)
 {
     for (const GList *iter = location_constraints; iter; iter = iter->next) {
         const pcmk__location_t *location = iter->data;
         const pcmk_node_t *constraint_node = NULL;
 
         if (location->role_filter == pcmk_role_promoted) {
             constraint_node = pe_find_node_id(location->nodes,
                                               chosen->private->id);
         }
         if (constraint_node != NULL) {
             int new_priority = pcmk__add_scores(child->private->priority,
                                                 constraint_node->assign->score);
 
             pcmk__rsc_trace(child,
                             "Applying location %s to %s promotion priority on "
                             "%s: %s + %s = %s",
                             location->id, child->id,
                             pcmk__node_name(constraint_node),
                             pcmk_readable_score(child->private->priority),
                             pcmk_readable_score(constraint_node->assign->score),
                             pcmk_readable_score(new_priority));
             child->private->priority = new_priority;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Get the node that an instance will be promoted on
  *
  * \param[in] rsc  Promotable clone instance to check
  *
  * \return Node that \p rsc will be promoted on, or NULL if none
  */
 static pcmk_node_t *
 node_to_be_promoted_on(const pcmk_resource_t *rsc)
 {
     pcmk_node_t *node = NULL;
     pcmk_node_t *local_node = NULL;
     const pcmk_resource_t *parent = NULL;
 
     // If this is a cloned group, bail if any group member can't be promoted
     for (GList *iter = rsc->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
 
         if (node_to_be_promoted_on(child) == NULL) {
             pcmk__rsc_trace(rsc,
                             "%s can't be promoted because member %s can't",
                             rsc->id, child->id);
             return NULL;
         }
     }
 
     node = rsc->private->fns->location(rsc, NULL, FALSE);
     if (node == NULL) {
         pcmk__rsc_trace(rsc, "%s can't be promoted because it won't be active",
                         rsc->id);
         return NULL;
 
     } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
         if (rsc->private->fns->state(rsc, TRUE) == pcmk_role_promoted) {
             crm_notice("Unmanaged instance %s will be left promoted on %s",
                        rsc->id, pcmk__node_name(node));
         } else {
             pcmk__rsc_trace(rsc, "%s can't be promoted because it is unmanaged",
                             rsc->id);
             return NULL;
         }
 
     } else if (rsc->private->priority < 0) {
         pcmk__rsc_trace(rsc,
                         "%s can't be promoted because its promotion priority "
                         "%d is negative",
                         rsc->id, rsc->private->priority);
         return NULL;
 
     } else if (!pcmk__node_available(node, false, true)) {
         pcmk__rsc_trace(rsc,
                         "%s can't be promoted because %s can't run resources",
                         rsc->id, pcmk__node_name(node));
         return NULL;
     }
 
     parent = pe__const_top_resource(rsc, false);
     local_node = g_hash_table_lookup(parent->private->allowed_nodes,
                                      node->private->id);
 
     if (local_node == NULL) {
         /* It should not be possible for the scheduler to have assigned the
          * instance to a node where its parent is not allowed, but it's good to
          * have a fail-safe.
          */
         if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
-            pcmk__sched_err("%s can't be promoted because %s is not allowed "
+            pcmk__sched_err(node->private->scheduler,
+                            "%s can't be promoted because %s is not allowed "
                             "on %s (scheduler bug?)",
                             rsc->id, parent->id, pcmk__node_name(node));
         } // else the instance is unmanaged and already promoted
         return NULL;
 
     } else if ((local_node->assign->count >= pe__clone_promoted_node_max(parent))
                && pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
         pcmk__rsc_trace(rsc,
                         "%s can't be promoted because %s has "
                         "maximum promoted instances already",
                         rsc->id, pcmk__node_name(node));
         return NULL;
     }
 
     return local_node;
 }
 
 /*!
  * \internal
  * \brief Compare two promotable clone instances by promotion priority
  *
  * \param[in] a  First instance to compare
  * \param[in] b  Second instance to compare
  *
  * \return A negative number if \p a has higher promotion priority,
  *         a positive number if \p b has higher promotion priority,
  *         or 0 if promotion priorities are equal
  */
 static gint
 cmp_promotable_instance(gconstpointer a, gconstpointer b)
 {
     const pcmk_resource_t *rsc1 = (const pcmk_resource_t *) a;
     const pcmk_resource_t *rsc2 = (const pcmk_resource_t *) b;
 
     enum rsc_role_e role1 = pcmk_role_unknown;
     enum rsc_role_e role2 = pcmk_role_unknown;
 
     CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
 
     // Check promotion priority set by pcmk__set_instance_roles()
     if (rsc1->private->promotion_priority > rsc2->private->promotion_priority) {
         pcmk__rsc_trace(rsc1,
                         "%s has higher promotion priority (%s) than %s (%d)",
                         rsc1->id,
                         pcmk_readable_score(rsc1->private->promotion_priority),
                         rsc2->id, rsc2->private->promotion_priority);
         return -1;
     }
 
     if (rsc1->private->promotion_priority < rsc2->private->promotion_priority) {
         pcmk__rsc_trace(rsc1,
                         "%s has lower promotion priority (%s) than %s (%d)",
                         rsc1->id,
                         pcmk_readable_score(rsc1->private->promotion_priority),
                         rsc2->id, rsc2->private->promotion_priority);
         return 1;
     }
 
     // If those are the same, prefer instance whose current role is higher
     role1 = rsc1->private->fns->state(rsc1, TRUE);
     role2 = rsc2->private->fns->state(rsc2, TRUE);
     if (role1 > role2) {
         pcmk__rsc_trace(rsc1,
                         "%s has higher promotion priority than %s "
                         "(higher current role)",
                         rsc1->id, rsc2->id);
         return -1;
     } else if (role1 < role2) {
         pcmk__rsc_trace(rsc1,
                         "%s has lower promotion priority than %s "
                         "(lower current role)",
                         rsc1->id, rsc2->id);
         return 1;
     }
 
     // Finally, do normal clone instance sorting
     return pcmk__cmp_instance(a, b);
 }
 
 /*!
  * \internal
  * \brief Add promotable clone instance's promotion priority to its node's score
  *
  * Add a promotable clone instance's promotion priority (which sums its
  * promotion preferences and scores of relevant location constraints for the
  * promoted role) to the node score of the instance's assigned node.
  *
  * \param[in]     data       Promotable clone instance
  * \param[in,out] user_data  Clone parent of \p data
  */
 static void
 add_promotion_priority_to_node_score(gpointer data, gpointer user_data)
 {
     const pcmk_resource_t *child = (const pcmk_resource_t *) data;
     pcmk_resource_t *clone = (pcmk_resource_t *) user_data;
 
     pcmk_node_t *node = NULL;
     const pcmk_node_t *chosen = NULL;
     const int promotion_priority = child->private->promotion_priority;
 
     if (promotion_priority < 0) {
         pcmk__rsc_trace(clone,
                         "Not adding promotion priority of %s: negative (%s)",
                         child->id, pcmk_readable_score(promotion_priority));
         return;
     }
 
     chosen = child->private->fns->location(child, NULL, FALSE);
     if (chosen == NULL) {
         pcmk__rsc_trace(clone, "Not adding promotion priority of %s: inactive",
                         child->id);
         return;
     }
 
     node = g_hash_table_lookup(clone->private->allowed_nodes,
                                chosen->private->id);
     CRM_ASSERT(node != NULL);
 
     node->assign->score = pcmk__add_scores(promotion_priority,
                                            node->assign->score);
     pcmk__rsc_trace(clone,
                     "Added cumulative priority of %s (%s) to score on %s "
                     "(now %d)",
                     child->id, pcmk_readable_score(promotion_priority),
                     pcmk__node_name(node), node->assign->score);
 }
 
 /*!
  * \internal
  * \brief Apply colocation to dependent's node scores if for promoted role
  *
  * \param[in,out] data       Colocation constraint to apply
  * \param[in,out] user_data  Promotable clone that is constraint's dependent
  */
 static void
 apply_coloc_to_dependent(gpointer data, gpointer user_data)
 {
     pcmk__colocation_t *colocation = data;
     pcmk_resource_t *clone = user_data;
     pcmk_resource_t *primary = colocation->primary;
     uint32_t flags = pcmk__coloc_select_default;
     float factor = colocation->score / (float) PCMK_SCORE_INFINITY;
 
     if (colocation->dependent_role != pcmk_role_promoted) {
         return;
     }
     if (colocation->score < PCMK_SCORE_INFINITY) {
         flags = pcmk__coloc_select_active;
     }
     pcmk__rsc_trace(clone, "Applying colocation %s (promoted %s with %s) @%s",
                     colocation->id, colocation->dependent->id,
                     colocation->primary->id,
                     pcmk_readable_score(colocation->score));
     primary->private->cmds->add_colocated_node_scores(primary, clone, clone->id,
                                                       &(clone->private->allowed_nodes),
                                                       colocation, factor,
                                                       flags);
 }
 
 /*!
  * \internal
  * \brief Apply colocation to primary's node scores if for promoted role
  *
  * \param[in,out] data       Colocation constraint to apply
  * \param[in,out] user_data  Promotable clone that is constraint's primary
  */
 static void
 apply_coloc_to_primary(gpointer data, gpointer user_data)
 {
     pcmk__colocation_t *colocation = data;
     pcmk_resource_t *clone = user_data;
     pcmk_resource_t *dependent = colocation->dependent;
     const float factor = colocation->score / (float) PCMK_SCORE_INFINITY;
     const uint32_t flags = pcmk__coloc_select_active
                            |pcmk__coloc_select_nonnegative;
 
     if ((colocation->primary_role != pcmk_role_promoted)
          || !pcmk__colocation_has_influence(colocation, NULL)) {
         return;
     }
 
     pcmk__rsc_trace(clone, "Applying colocation %s (%s with promoted %s) @%s",
                     colocation->id, colocation->dependent->id,
                     colocation->primary->id,
                     pcmk_readable_score(colocation->score));
     dependent->private->cmds->add_colocated_node_scores(dependent, clone,
                                                         clone->id,
                                                         &(clone->private->allowed_nodes),
                                                         colocation, factor,
                                                         flags);
 }
 
 /*!
  * \internal
  * \brief Set clone instance's promotion priority to its node's score
  *
  * \param[in,out] data       Promotable clone instance
  * \param[in]     user_data  Parent clone of \p data
  */
 static void
 set_promotion_priority_to_node_score(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *child = (pcmk_resource_t *) data;
     const pcmk_resource_t *clone = (const pcmk_resource_t *) user_data;
 
     pcmk_node_t *chosen = child->private->fns->location(child, NULL, FALSE);
 
     if (!pcmk_is_set(child->flags, pcmk__rsc_managed)
         && (child->private->next_role == pcmk_role_promoted)) {
         child->private->promotion_priority = PCMK_SCORE_INFINITY;
         pcmk__rsc_trace(clone,
                         "Final promotion priority for %s is %s "
                         "(unmanaged promoted)",
                         child->id, pcmk_readable_score(PCMK_SCORE_INFINITY));
 
     } else if (chosen == NULL) {
         child->private->promotion_priority = -PCMK_SCORE_INFINITY;
         pcmk__rsc_trace(clone,
                         "Final promotion priority for %s is %s "
                         "(will not be active)",
                         child->id, pcmk_readable_score(-PCMK_SCORE_INFINITY));
 
     } else if (child->private->promotion_priority < 0) {
         pcmk__rsc_trace(clone,
                         "Final promotion priority for %s is %s "
                         "(ignoring node score)",
                         child->id,
                         pcmk_readable_score(child->private->promotion_priority));
 
     } else {
         const pcmk_node_t *node = NULL;
 
         node = g_hash_table_lookup(clone->private->allowed_nodes,
                                    chosen->private->id);
 
         CRM_ASSERT(node != NULL);
         child->private->promotion_priority = node->assign->score;
         pcmk__rsc_trace(clone,
                         "Adding scores for %s: "
                         "final promotion priority for %s is %s",
                         clone->id, child->id,
                         pcmk_readable_score(child->private->promotion_priority));
     }
 }
 
 /*!
  * \internal
  * \brief Sort a promotable clone's instances by descending promotion priority
  *
  * \param[in,out] clone  Promotable clone to sort
  */
 static void
 sort_promotable_instances(pcmk_resource_t *clone)
 {
     GList *colocations = NULL;
 
     if (pe__set_clone_flag(clone, pcmk__clone_promotion_constrained)
             == pcmk_rc_already) {
         return;
     }
     pcmk__set_rsc_flags(clone, pcmk__rsc_updating_nodes);
 
     for (GList *iter = clone->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
 
         pcmk__rsc_trace(clone,
                         "Adding scores for %s: "
                         "initial promotion priority for %s is %s",
                         clone->id, child->id,
                         pcmk_readable_score(child->private->promotion_priority));
     }
     pe__show_node_scores(true, clone, "Before", clone->private->allowed_nodes,
                          clone->private->scheduler);
 
     g_list_foreach(clone->private->children,
                    add_promotion_priority_to_node_score, clone);
 
     colocations = pcmk__this_with_colocations(clone);
     g_list_foreach(colocations, apply_coloc_to_dependent, clone);
     g_list_free(colocations);
 
     colocations = pcmk__with_this_colocations(clone);
     g_list_foreach(colocations, apply_coloc_to_primary, clone);
     g_list_free(colocations);
 
     // Ban resource from all nodes if it needs a ticket but doesn't have it
     pcmk__require_promotion_tickets(clone);
 
     pe__show_node_scores(true, clone, "After", clone->private->allowed_nodes,
                          clone->private->scheduler);
 
     // Reset promotion priorities to final node scores
     g_list_foreach(clone->private->children,
                    set_promotion_priority_to_node_score, clone);
 
     // Finally, sort instances in descending order of promotion priority
     clone->private->children = g_list_sort(clone->private->children,
                                            cmp_promotable_instance);
     pcmk__clear_rsc_flags(clone, pcmk__rsc_updating_nodes);
 }
 
 /*!
  * \internal
  * \brief Find the active instance (if any) of an anonymous clone on a node
  *
  * \param[in] clone  Anonymous clone to check
  * \param[in] id     Instance ID (without instance number) to check
  * \param[in] node   Node to check
  *
  * \return
  */
 static pcmk_resource_t *
 find_active_anon_instance(const pcmk_resource_t *clone, const char *id,
                           const pcmk_node_t *node)
 {
     for (GList *iter = clone->private->children; iter; iter = iter->next) {
         pcmk_resource_t *child = iter->data;
         pcmk_resource_t *active = NULL;
 
         // Use ->find_rsc() in case this is a cloned group
         active = clone->private->fns->find_rsc(child, id, node,
                                                pcmk_rsc_match_clone_only
                                                |pcmk_rsc_match_current_node);
         if (active != NULL) {
             return active;
         }
     }
     return NULL;
 }
 
 /*
  * \brief Check whether an anonymous clone instance is known on a node
  *
  * \param[in] clone  Anonymous clone to check
  * \param[in] id     Instance ID (without instance number) to check
  * \param[in] node   Node to check
  *
  * \return true if \p id instance of \p clone is known on \p node,
  *         otherwise false
  */
 static bool
 anonymous_known_on(const pcmk_resource_t *clone, const char *id,
                    const pcmk_node_t *node)
 {
     for (GList *iter = clone->private->children; iter; iter = iter->next) {
         pcmk_resource_t *child = iter->data;
 
         /* Use ->find_rsc() because this might be a cloned group, and knowing
          * that other members of the group are known here implies nothing.
          */
         child = clone->private->fns->find_rsc(child, id, NULL,
                                               pcmk_rsc_match_clone_only);
         CRM_LOG_ASSERT(child != NULL);
         if (child != NULL) {
             if (g_hash_table_lookup(child->private->probed_nodes,
                                     node->private->id)) {
                 return true;
             }
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is allowed to run a resource
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p node is allowed to run \p rsc, otherwise false
  */
 static bool
 is_allowed(const pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     pcmk_node_t *allowed = g_hash_table_lookup(rsc->private->allowed_nodes,
                                                node->private->id);
 
     return (allowed != NULL) && (allowed->assign->score >= 0);
 }
 
 /*!
  * \brief Check whether a clone instance's promotion score should be considered
  *
  * \param[in] rsc   Promotable clone instance to check
  * \param[in] node  Node where score would be applied
  *
  * \return true if \p rsc's promotion score should be considered on \p node,
  *         otherwise false
  */
 static bool
 promotion_score_applies(const pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     char *id = clone_strip(rsc->id);
     const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
     pcmk_resource_t *active = NULL;
     const char *reason = "allowed";
 
     // Some checks apply only to anonymous clone instances
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
 
         // If instance is active on the node, its score definitely applies
         active = find_active_anon_instance(parent, id, node);
         if (active == rsc) {
             reason = "active";
             goto check_allowed;
         }
 
         /* If *no* instance is active on this node, this instance's score will
          * count if it has been probed on this node.
          */
         if ((active == NULL) && anonymous_known_on(parent, id, node)) {
             reason = "probed";
             goto check_allowed;
         }
     }
 
     /* If this clone's status is unknown on *all* nodes (e.g. cluster startup),
      * take all instances' scores into account, to make sure we use any
      * permanent promotion scores.
      */
     if ((rsc->private->active_nodes == NULL)
         && (g_hash_table_size(rsc->private->probed_nodes) == 0)) {
         reason = "none probed";
         goto check_allowed;
     }
 
     /* Otherwise, we've probed and/or started the resource *somewhere*, so
      * consider promotion scores on nodes where we know the status.
      */
     if ((g_hash_table_lookup(rsc->private->probed_nodes,
                              node->private->id) != NULL)
         || (pe_find_node_id(rsc->private->active_nodes,
                             node->private->id) != NULL)) {
         reason = "known";
     } else {
         pcmk__rsc_trace(rsc,
                         "Ignoring %s promotion score (for %s) on %s: "
                         "not probed",
                         rsc->id, id, pcmk__node_name(node));
         free(id);
         return false;
     }
 
 check_allowed:
     if (is_allowed(rsc, node)) {
         pcmk__rsc_trace(rsc, "Counting %s promotion score (for %s) on %s: %s",
                         rsc->id, id, pcmk__node_name(node), reason);
         free(id);
         return true;
     }
 
     pcmk__rsc_trace(rsc,
                     "Ignoring %s promotion score (for %s) on %s: not allowed",
                     rsc->id, id, pcmk__node_name(node));
     free(id);
     return false;
 }
 
 /*!
  * \internal
  * \brief Get the value of a promotion score node attribute
  *
  * \param[in] rsc   Promotable clone instance to get promotion score for
  * \param[in] node  Node to get promotion score for
  * \param[in] name  Resource name to use in promotion score attribute name
  *
  * \return Value of promotion score node attribute for \p rsc on \p node
  */
 static const char *
 promotion_attr_value(const pcmk_resource_t *rsc, const pcmk_node_t *node,
                      const char *name)
 {
     char *attr_name = NULL;
     const char *attr_value = NULL;
     const char *target = NULL;
     enum pcmk__rsc_node node_type = pcmk__rsc_node_assigned;
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_unassigned)) {
         // Not assigned yet
         node_type = pcmk__rsc_node_current;
     }
     target = g_hash_table_lookup(rsc->private->meta,
                                  PCMK_META_CONTAINER_ATTRIBUTE_TARGET);
     attr_name = pcmk_promotion_score_name(name);
     attr_value = pcmk__node_attr(node, attr_name, target, node_type);
     free(attr_name);
     return attr_value;
 }
 
 /*!
  * \internal
  * \brief Get the promotion score for a clone instance on a node
  *
  * \param[in]  rsc         Promotable clone instance to get score for
  * \param[in]  node        Node to get score for
  * \param[out] is_default  If non-NULL, will be set true if no score available
  *
  * \return Promotion score for \p rsc on \p node (or 0 if none)
  */
 static int
 promotion_score(const pcmk_resource_t *rsc, const pcmk_node_t *node,
                 bool *is_default)
 {
     const char *name = NULL;
     const char *attr_value = NULL;
 
     if (is_default != NULL) {
         *is_default = true;
     }
 
     CRM_CHECK((rsc != NULL) && (node != NULL), return 0);
 
     /* If this is an instance of a cloned group, the promotion score is the sum
      * of all members' promotion scores.
      */
     if (rsc->private->children != NULL) {
         int score = 0;
 
         for (const GList *iter = rsc->private->children;
              iter != NULL; iter = iter->next) {
 
             const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
             bool child_default = false;
             int child_score = promotion_score(child, node, &child_default);
 
             if (!child_default && (is_default != NULL)) {
                 *is_default = false;
             }
             score += child_score;
         }
         return score;
     }
 
     if (!promotion_score_applies(rsc, node)) {
         return 0;
     }
 
     /* For the promotion score attribute name, use the name the resource is
      * known as in resource history, since that's what crm_attribute --promotion
      * would have used.
      */
     name = pcmk__s(rsc->private->history_id, rsc->id);
 
     attr_value = promotion_attr_value(rsc, node, name);
     if (attr_value != NULL) {
         pcmk__rsc_trace(rsc, "Promotion score for %s on %s = %s",
                         name, pcmk__node_name(node),
                         pcmk__s(attr_value, "(unset)"));
     } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
         /* If we don't have any resource history yet, we won't have history_id.
          * In that case, for anonymous clones, try the resource name without
          * any instance number.
          */
         char *rsc_name = clone_strip(rsc->id);
 
         if (strcmp(rsc->id, rsc_name) != 0) {
             attr_value = promotion_attr_value(rsc, node, rsc_name);
             pcmk__rsc_trace(rsc, "Promotion score for %s on %s (for %s) = %s",
                             rsc_name, pcmk__node_name(node), rsc->id,
                             pcmk__s(attr_value, "(unset)"));
         }
         free(rsc_name);
     }
 
     if (attr_value == NULL) {
         return 0;
     }
 
     if (is_default != NULL) {
         *is_default = false;
     }
     return char2score(attr_value);
 }
 
 /*!
  * \internal
  * \brief Include promotion scores in instances' node scores and priorities
  *
  * \param[in,out] rsc  Promotable clone resource to update
  */
 void
 pcmk__add_promotion_scores(pcmk_resource_t *rsc)
 {
     if (pe__set_clone_flag(rsc,
                            pcmk__clone_promotion_added) == pcmk_rc_already) {
         return;
     }
 
     for (GList *iter = rsc->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *child_rsc = (pcmk_resource_t *) iter->data;
 
         GHashTableIter iter;
         pcmk_node_t *node = NULL;
         int score, new_score;
 
         g_hash_table_iter_init(&iter, child_rsc->private->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             if (!pcmk__node_available(node, false, false)) {
                 /* This node will never be promoted, so don't apply the
                  * promotion score, as that may lead to clone shuffling.
                  */
                 continue;
             }
 
             score = promotion_score(child_rsc, node, NULL);
             if (score > 0) {
                 new_score = pcmk__add_scores(node->assign->score, score);
                 if (new_score != node->assign->score) { // Could remain INFINITY
                     node->assign->score = new_score;
                     pcmk__rsc_trace(rsc,
                                     "Added %s promotion priority (%s) to score "
                                     "on %s (now %s)",
                                     child_rsc->id, pcmk_readable_score(score),
                                     pcmk__node_name(node),
                                     pcmk_readable_score(new_score));
                 }
             }
 
             if (score > child_rsc->private->priority) {
                 pcmk__rsc_trace(rsc,
                                 "Updating %s priority to promotion score "
                                 "(%d->%d)",
                                 child_rsc->id, child_rsc->private->priority,
                                 score);
                 child_rsc->private->priority = score;
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief If a resource's current role is started, change it to unpromoted
  *
  * \param[in,out] data       Resource to update
  * \param[in]     user_data  Ignored
  */
 static void
 set_current_role_unpromoted(void *data, void *user_data)
 {
     pcmk_resource_t *rsc = (pcmk_resource_t *) data;
 
     if (rsc->private->orig_role == pcmk_role_started) {
         // Promotable clones should use unpromoted role instead of started
         rsc->private->orig_role = pcmk_role_unpromoted;
     }
     g_list_foreach(rsc->private->children, set_current_role_unpromoted, NULL);
 }
 
 /*!
  * \internal
  * \brief Set a resource's next role to unpromoted (or stopped if unassigned)
  *
  * \param[in,out] data       Resource to update
  * \param[in]     user_data  Ignored
  */
 static void
 set_next_role_unpromoted(void *data, void *user_data)
 {
     pcmk_resource_t *rsc = (pcmk_resource_t *) data;
     GList *assigned = NULL;
 
     rsc->private->fns->location(rsc, &assigned, FALSE);
     if (assigned == NULL) {
         pe__set_next_role(rsc, pcmk_role_stopped, "stopped instance");
     } else {
         pe__set_next_role(rsc, pcmk_role_unpromoted, "unpromoted instance");
         g_list_free(assigned);
     }
     g_list_foreach(rsc->private->children, set_next_role_unpromoted, NULL);
 }
 
 /*!
  * \internal
  * \brief Set a resource's next role to promoted if not already set
  *
  * \param[in,out] data       Resource to update
  * \param[in]     user_data  Ignored
  */
 static void
 set_next_role_promoted(void *data, gpointer user_data)
 {
     pcmk_resource_t *rsc = (pcmk_resource_t *) data;
 
     if (rsc->private->next_role == pcmk_role_unknown) {
         pe__set_next_role(rsc, pcmk_role_promoted, "promoted instance");
     }
     g_list_foreach(rsc->private->children, set_next_role_promoted, NULL);
 }
 
 /*!
  * \internal
  * \brief Show instance's promotion score on node where it will be active
  *
  * \param[in,out] instance  Promotable clone instance to show
  */
 static void
 show_promotion_score(pcmk_resource_t *instance)
 {
     pcmk_node_t *chosen = instance->private->fns->location(instance, NULL,
                                                            FALSE);
     const char *score_s = NULL;
 
     score_s = pcmk_readable_score(instance->private->promotion_priority);
     if (pcmk_is_set(instance->private->scheduler->flags,
                     pcmk_sched_output_scores)
         && !pcmk__is_daemon && (instance->private->scheduler->priv != NULL)) {
 
         pcmk__output_t *out = instance->private->scheduler->priv;
 
         out->message(out, "promotion-score", instance, chosen, score_s);
 
     } else if (chosen == NULL) {
         pcmk__rsc_debug(pe__const_top_resource(instance, false),
                         "%s promotion score (inactive): %s (priority=%d)",
                         instance->id, score_s, instance->private->priority);
 
     } else {
         pcmk__rsc_debug(pe__const_top_resource(instance, false),
                         "%s promotion score on %s: %s (priority=%d)",
                         instance->id, pcmk__node_name(chosen),
                         score_s, instance->private->priority);
     }
 }
 
 /*!
  * \internal
  * \brief Set a clone instance's promotion priority
  *
  * \param[in,out] data       Promotable clone instance to update
  * \param[in]     user_data  Instance's parent clone
  */
 static void
 set_instance_priority(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *instance = (pcmk_resource_t *) data;
     const pcmk_resource_t *clone = (const pcmk_resource_t *) user_data;
 
     const pcmk_node_t *chosen = NULL;
     enum rsc_role_e next_role = pcmk_role_unknown;
     GList *list = NULL;
 
     pcmk__rsc_trace(clone, "Assigning priority for %s: %s", instance->id,
                     pcmk_role_text(instance->private->next_role));
 
     if (instance->private->fns->state(instance, TRUE) == pcmk_role_started) {
         set_current_role_unpromoted(instance, NULL);
     }
 
     // Only an instance that will be active can be promoted
     chosen = instance->private->fns->location(instance, &list, FALSE);
     if (pcmk__list_of_multiple(list)) {
         pcmk__config_err("Cannot promote non-colocated child %s",
                          instance->id);
     }
     g_list_free(list);
     if (chosen == NULL) {
         return;
     }
 
     next_role = instance->private->fns->state(instance, FALSE);
     switch (next_role) {
         case pcmk_role_started:
         case pcmk_role_unknown:
             // Set instance priority to its promotion score (or -1 if none)
             {
                 bool is_default = false;
 
                 instance->private->priority = promotion_score(instance, chosen,
                                                               &is_default);
                 if (is_default) {
                     /* Default to -1 if no value is set. This allows instances
                      * eligible for promotion to be specified based solely on
                      * PCMK_XE_RSC_LOCATION constraints, but prevents any
                      * instance from being promoted if neither a constraint nor
                      * a promotion score is present.
                      */
                     instance->private->priority = -1;
                 }
             }
             break;
 
         case pcmk_role_unpromoted:
         case pcmk_role_stopped:
             // Instance can't be promoted
             instance->private->priority = -PCMK_SCORE_INFINITY;
             break;
 
         case pcmk_role_promoted:
             // Nothing needed (re-creating actions after scheduling fencing)
             break;
 
         default:
             CRM_CHECK(FALSE, crm_err("Unknown resource role %d for %s",
                                      next_role, instance->id));
     }
 
     // Add relevant location constraint scores for promoted role
     apply_promoted_locations(instance, instance->private->location_constraints,
                              chosen);
     apply_promoted_locations(instance, clone->private->location_constraints,
                              chosen);
 
     // Consider instance's role-based colocations with other resources
     list = pcmk__this_with_colocations(instance);
     for (GList *iter = list; iter != NULL; iter = iter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) iter->data;
 
         instance->private->cmds->apply_coloc_score(instance, cons->primary,
                                                    cons, true);
     }
     g_list_free(list);
 
     instance->private->promotion_priority = instance->private->priority;
     if (next_role == pcmk_role_promoted) {
         instance->private->promotion_priority = PCMK_SCORE_INFINITY;
     }
     pcmk__rsc_trace(clone, "Assigning %s priority = %d",
                     instance->id, instance->private->priority);
 }
 
 /*!
  * \internal
  * \brief Set a promotable clone instance's role
  *
  * \param[in,out] data       Promotable clone instance to update
  * \param[in,out] user_data  Pointer to count of instances chosen for promotion
  */
 static void
 set_instance_role(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *instance = (pcmk_resource_t *) data;
     int *count = (int *) user_data;
 
     const pcmk_resource_t *clone = pe__const_top_resource(instance, false);
     const pcmk_scheduler_t *scheduler = instance->private->scheduler;
     pcmk_node_t *chosen = NULL;
 
     show_promotion_score(instance);
 
     if (instance->private->promotion_priority < 0) {
         pcmk__rsc_trace(clone, "Not supposed to promote instance %s",
                         instance->id);
 
     } else if ((*count < pe__clone_promoted_max(instance))
                || !pcmk_is_set(clone->flags, pcmk__rsc_managed)) {
         chosen = node_to_be_promoted_on(instance);
     }
 
     if (chosen == NULL) {
         set_next_role_unpromoted(instance, NULL);
         return;
     }
 
     if ((instance->private->orig_role < pcmk_role_promoted)
         && !pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
         && (scheduler->no_quorum_policy == pcmk_no_quorum_freeze)) {
         crm_notice("Clone instance %s cannot be promoted without quorum",
                    instance->id);
         set_next_role_unpromoted(instance, NULL);
         return;
     }
 
     chosen->assign->count++;
     pcmk__rsc_info(clone, "Choosing %s (%s) on %s for promotion",
                    instance->id, pcmk_role_text(instance->private->orig_role),
                    pcmk__node_name(chosen));
     set_next_role_promoted(instance, NULL);
     (*count)++;
 }
 
 /*!
  * \internal
  * \brief Set roles for all instances of a promotable clone
  *
  * \param[in,out] rsc  Promotable clone resource to update
  */
 void
 pcmk__set_instance_roles(pcmk_resource_t *rsc)
 {
     int promoted = 0;
     GHashTableIter iter;
     pcmk_node_t *node = NULL;
 
     // Repurpose count to track the number of promoted instances assigned
     g_hash_table_iter_init(&iter, rsc->private->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         node->assign->count = 0;
     }
 
     // Set instances' promotion priorities and sort by highest priority first
     g_list_foreach(rsc->private->children, set_instance_priority, rsc);
     sort_promotable_instances(rsc);
 
     // Choose the first N eligible instances to be promoted
     g_list_foreach(rsc->private->children, set_instance_role, &promoted);
     pcmk__rsc_info(rsc, "%s: Promoted %d instances of a possible %d",
                    rsc->id, promoted, pe__clone_promoted_max(rsc));
 }
 
 /*!
  *
  * \internal
  * \brief Create actions for promotable clone instances
  *
  * \param[in,out] clone          Promotable clone to create actions for
  * \param[out]    any_promoting  Will be set true if any instance is promoting
  * \param[out]    any_demoting   Will be set true if any instance is demoting
  */
 static void
 create_promotable_instance_actions(pcmk_resource_t *clone,
                                    bool *any_promoting, bool *any_demoting)
 {
     for (GList *iter = clone->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
 
         instance->private->cmds->create_actions(instance);
         check_for_role_change(instance, any_demoting, any_promoting);
     }
 }
 
 /*!
  * \internal
  * \brief Reset each promotable instance's resource priority
  *
  * Reset the priority of each instance of a promotable clone to the clone's
  * priority (after promotion actions are scheduled, when instance priorities
  * were repurposed as promotion scores).
  *
  * \param[in,out] clone  Promotable clone to reset
  */
 static void
 reset_instance_priorities(pcmk_resource_t *clone)
 {
     for (GList *iter = clone->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
 
         instance->private->priority = clone->private->priority;
     }
 }
 
 /*!
  * \internal
  * \brief Create actions specific to promotable clones
  *
  * \param[in,out] clone  Promotable clone to create actions for
  */
 void
 pcmk__create_promotable_actions(pcmk_resource_t *clone)
 {
     bool any_promoting = false;
     bool any_demoting = false;
 
     // Create actions for each clone instance individually
     create_promotable_instance_actions(clone, &any_promoting, &any_demoting);
 
     // Create pseudo-actions for clone as a whole
     pe__create_promotable_pseudo_ops(clone, any_promoting, any_demoting);
 
     // Undo our temporary repurposing of resource priority for instances
     reset_instance_priorities(clone);
 }
 
 /*!
  * \internal
  * \brief Create internal orderings for a promotable clone's instances
  *
  * \param[in,out] clone  Promotable clone instance to order
  */
 void
 pcmk__order_promotable_instances(pcmk_resource_t *clone)
 {
     pcmk_resource_t *previous = NULL; // Needed for ordered clones
 
     pcmk__promotable_restart_ordering(clone);
 
     for (GList *iter = clone->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
 
         // Demote before promote
         pcmk__order_resource_actions(instance, PCMK_ACTION_DEMOTE,
                                      instance, PCMK_ACTION_PROMOTE,
                                      pcmk__ar_ordered);
 
         order_instance_promotion(clone, instance, previous);
         order_instance_demotion(clone, instance, previous);
         previous = instance;
     }
 }
 
 /*!
  * \internal
  * \brief Update dependent's allowed nodes for colocation with promotable
  *
  * \param[in,out] dependent     Dependent resource to update
  * \param[in]     primary       Primary resource
  * \param[in]     primary_node  Node where an instance of the primary will be
  * \param[in]     colocation    Colocation constraint to apply
  */
 static void
 update_dependent_allowed_nodes(pcmk_resource_t *dependent,
                                const pcmk_resource_t *primary,
                                const pcmk_node_t *primary_node,
                                const pcmk__colocation_t *colocation)
 {
     GHashTableIter iter;
     pcmk_node_t *node = NULL;
     const char *primary_value = NULL;
     const char *attr = colocation->node_attribute;
 
     if (colocation->score >= PCMK_SCORE_INFINITY) {
         return; // Colocation is mandatory, so allowed node scores don't matter
     }
 
     primary_value = pcmk__colocation_node_attr(primary_node, attr, primary);
 
     pcmk__rsc_trace(colocation->primary,
                     "Applying %s (%s with %s on %s by %s @%d) to %s",
                     colocation->id, colocation->dependent->id,
                     colocation->primary->id, pcmk__node_name(primary_node),
                     attr, colocation->score, dependent->id);
 
     g_hash_table_iter_init(&iter, dependent->private->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         const char *dependent_value = pcmk__colocation_node_attr(node, attr,
                                                                  dependent);
 
         if (pcmk__str_eq(primary_value, dependent_value, pcmk__str_casei)) {
             node->assign->score = pcmk__add_scores(node->assign->score,
                                                    colocation->score);
             pcmk__rsc_trace(colocation->primary,
                             "Added %s score (%s) to %s (now %s)",
                             colocation->id,
                             pcmk_readable_score(colocation->score),
                             pcmk__node_name(node),
                             pcmk_readable_score(node->assign->score));
         }
     }
 }
 
 /*!
  * \brief Update dependent for a colocation with a promotable clone
  *
  * \param[in]     primary     Primary resource in the colocation
  * \param[in,out] dependent   Dependent resource in the colocation
  * \param[in]     colocation  Colocation constraint to apply
  */
 void
 pcmk__update_dependent_with_promotable(const pcmk_resource_t *primary,
                                        pcmk_resource_t *dependent,
                                        const pcmk__colocation_t *colocation)
 {
     GList *affected_nodes = NULL;
 
     /* Build a list of all nodes where an instance of the primary will be, and
      * (for optional colocations) update the dependent's allowed node scores for
      * each one.
      */
     for (GList *iter = primary->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *instance = (pcmk_resource_t *) iter->data;
         pcmk_node_t *node = instance->private->fns->location(instance, NULL,
                                                              FALSE);
 
         if (node == NULL) {
             continue;
         }
         if (instance->private->fns->state(instance,
                                           FALSE) == colocation->primary_role) {
             update_dependent_allowed_nodes(dependent, primary, node,
                                            colocation);
             affected_nodes = g_list_prepend(affected_nodes, node);
         }
     }
 
     /* For mandatory colocations, add the primary's node score to the
      * dependent's node score for each affected node, and ban the dependent
      * from all other nodes.
      *
      * However, skip this for promoted-with-promoted colocations, otherwise
      * inactive dependent instances can't start (in the unpromoted role).
      */
     if ((colocation->score >= PCMK_SCORE_INFINITY)
         && ((colocation->dependent_role != pcmk_role_promoted)
             || (colocation->primary_role != pcmk_role_promoted))) {
 
         pcmk__rsc_trace(colocation->primary,
                         "Applying %s (mandatory %s with %s) to %s",
                         colocation->id, colocation->dependent->id,
                         colocation->primary->id, dependent->id);
         pcmk__colocation_intersect_nodes(dependent, primary, colocation,
                                          affected_nodes, true);
     }
     g_list_free(affected_nodes);
 }
 
 /*!
  * \internal
  * \brief Update dependent priority for colocation with promotable
  *
  * \param[in]     primary     Primary resource in the colocation
  * \param[in,out] dependent   Dependent resource in the colocation
  * \param[in]     colocation  Colocation constraint to apply
  */
 void
 pcmk__update_promotable_dependent_priority(const pcmk_resource_t *primary,
                                            pcmk_resource_t *dependent,
                                            const pcmk__colocation_t *colocation)
 {
     pcmk_resource_t *primary_instance = NULL;
 
     // Look for a primary instance where dependent will be
     primary_instance = pcmk__find_compatible_instance(dependent, primary,
                                                       colocation->primary_role,
                                                       false);
 
     if (primary_instance != NULL) {
         // Add primary instance's priority to dependent's
         int new_priority = pcmk__add_scores(dependent->private->priority,
                                             colocation->score);
 
         pcmk__rsc_trace(colocation->primary,
                         "Applying %s (%s with %s) to %s priority "
                         "(%s + %s = %s)",
                         colocation->id, colocation->dependent->id,
                         colocation->primary->id, dependent->id,
                         pcmk_readable_score(dependent->private->priority),
                         pcmk_readable_score(colocation->score),
                         pcmk_readable_score(new_priority));
         dependent->private->priority = new_priority;
 
     } else if (colocation->score >= PCMK_SCORE_INFINITY) {
         // Mandatory colocation, but primary won't be here
         pcmk__rsc_trace(colocation->primary,
                         "Applying %s (%s with %s) to %s: can't be promoted",
                         colocation->id, colocation->dependent->id,
                         colocation->primary->id, dependent->id);
         dependent->private->priority = -PCMK_SCORE_INFINITY;
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index e555a30ef3..7a7051cc95 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -1,795 +1,796 @@
 /*
  * Copyright 2014-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdlib.h>
 #include <string.h>
 #include <crm/common/xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 // Resource assignment methods by resource variant
 static pcmk__assignment_methods_t assignment_methods[] = {
     {
         pcmk__primitive_assign,
         pcmk__primitive_create_actions,
         pcmk__probe_rsc_on_node,
         pcmk__primitive_internal_constraints,
         pcmk__primitive_apply_coloc_score,
         pcmk__colocated_resources,
         pcmk__with_primitive_colocations,
         pcmk__primitive_with_colocations,
         pcmk__add_colocated_node_scores,
         pcmk__apply_location,
         pcmk__primitive_action_flags,
         pcmk__update_ordered_actions,
         pcmk__output_resource_actions,
         pcmk__add_rsc_actions_to_graph,
         pcmk__primitive_add_graph_meta,
         pcmk__primitive_add_utilization,
         pcmk__primitive_shutdown_lock,
     },
     {
         pcmk__group_assign,
         pcmk__group_create_actions,
         pcmk__probe_rsc_on_node,
         pcmk__group_internal_constraints,
         pcmk__group_apply_coloc_score,
         pcmk__group_colocated_resources,
         pcmk__with_group_colocations,
         pcmk__group_with_colocations,
         pcmk__group_add_colocated_node_scores,
         pcmk__group_apply_location,
         pcmk__group_action_flags,
         pcmk__group_update_ordered_actions,
         pcmk__output_resource_actions,
         pcmk__add_rsc_actions_to_graph,
         pcmk__noop_add_graph_meta,
         pcmk__group_add_utilization,
         pcmk__group_shutdown_lock,
     },
     {
         pcmk__clone_assign,
         pcmk__clone_create_actions,
         pcmk__clone_create_probe,
         pcmk__clone_internal_constraints,
         pcmk__clone_apply_coloc_score,
         pcmk__colocated_resources,
         pcmk__with_clone_colocations,
         pcmk__clone_with_colocations,
         pcmk__add_colocated_node_scores,
         pcmk__clone_apply_location,
         pcmk__clone_action_flags,
         pcmk__instance_update_ordered_actions,
         pcmk__output_resource_actions,
         pcmk__clone_add_actions_to_graph,
         pcmk__clone_add_graph_meta,
         pcmk__clone_add_utilization,
         pcmk__clone_shutdown_lock,
     },
     {
         pcmk__bundle_assign,
         pcmk__bundle_create_actions,
         pcmk__bundle_create_probe,
         pcmk__bundle_internal_constraints,
         pcmk__bundle_apply_coloc_score,
         pcmk__colocated_resources,
         pcmk__with_bundle_colocations,
         pcmk__bundle_with_colocations,
         pcmk__add_colocated_node_scores,
         pcmk__bundle_apply_location,
         pcmk__bundle_action_flags,
         pcmk__instance_update_ordered_actions,
         pcmk__output_bundle_actions,
         pcmk__bundle_add_actions_to_graph,
         pcmk__noop_add_graph_meta,
         pcmk__bundle_add_utilization,
         pcmk__bundle_shutdown_lock,
     }
 };
 
 /*!
  * \internal
  * \brief Check whether a resource's agent standard, provider, or type changed
  *
  * \param[in,out] rsc             Resource to check
  * \param[in,out] node            Node needing unfencing if agent changed
  * \param[in]     rsc_entry       XML with previously known agent information
  * \param[in]     active_on_node  Whether \p rsc is active on \p node
  *
  * \return true if agent for \p rsc changed, otherwise false
  */
 bool
 pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
                         const xmlNode *rsc_entry, bool active_on_node)
 {
     bool changed = false;
     const char *attr_list[] = {
         PCMK_XA_TYPE,
         PCMK_XA_CLASS,
         PCMK_XA_PROVIDER,
     };
 
     for (int i = 0; i < PCMK__NELEM(attr_list); i++) {
         const char *value = crm_element_value(rsc->private->xml, attr_list[i]);
         const char *old_value = crm_element_value(rsc_entry, attr_list[i]);
 
         if (!pcmk__str_eq(value, old_value, pcmk__str_none)) {
             changed = true;
             trigger_unfencing(rsc, node, "Device definition changed", NULL,
                               rsc->private->scheduler);
             if (active_on_node) {
                 crm_notice("Forcing restart of %s on %s "
                            "because %s changed from '%s' to '%s'",
                            rsc->id, pcmk__node_name(node), attr_list[i],
                            pcmk__s(old_value, ""), pcmk__s(value, ""));
             }
         }
     }
     if (changed && active_on_node) {
         // Make sure the resource is restarted
         custom_action(rsc, stop_key(rsc), PCMK_ACTION_STOP, node, FALSE,
                       rsc->private->scheduler);
         pcmk__set_rsc_flags(rsc, pcmk__rsc_start_pending);
     }
     return changed;
 }
 
 /*!
  * \internal
  * \brief Add resource (and any matching children) to list if it matches ID
  *
  * \param[in] result  List to add resource to
  * \param[in] rsc     Resource to check
  * \param[in] id      ID to match
  *
  * \return (Possibly new) head of list
  */
 static GList *
 add_rsc_if_matching(GList *result, pcmk_resource_t *rsc, const char *id)
 {
     if (pcmk__str_eq(id, rsc->id, pcmk__str_none)
         || pcmk__str_eq(id, rsc->private->history_id, pcmk__str_none)) {
         result = g_list_prepend(result, rsc);
     }
 
     for (GList *iter = rsc->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
 
         result = add_rsc_if_matching(result, child, id);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Find all resources matching a given ID by either ID or clone name
  *
  * \param[in] id         Resource ID to check
  * \param[in] scheduler  Scheduler data
  *
  * \return List of all resources that match \p id
  * \note The caller is responsible for freeing the return value with
  *       g_list_free().
  */
 GList *
 pcmk__rscs_matching_id(const char *id, const pcmk_scheduler_t *scheduler)
 {
     GList *result = NULL;
 
     CRM_CHECK((id != NULL) && (scheduler != NULL), return NULL);
     for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
         result = add_rsc_if_matching(result, (pcmk_resource_t *) iter->data,
                                      id);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Set the variant-appropriate assignment methods for a resource
  *
  * \param[in,out] data       Resource to set assignment methods for
  * \param[in]     user_data  Ignored
  */
 static void
 set_assignment_methods_for_rsc(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *rsc = data;
 
     rsc->private->cmds = &assignment_methods[rsc->private->variant];
     g_list_foreach(rsc->private->children, set_assignment_methods_for_rsc,
                    NULL);
 }
 
 /*!
  * \internal
  * \brief Set the variant-appropriate assignment methods for all resources
  *
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler)
 {
     g_list_foreach(scheduler->resources, set_assignment_methods_for_rsc, NULL);
 }
 
 /*!
  * \internal
  * \brief Wrapper for colocated_resources() method for readability
  *
  * \param[in]      rsc       Resource to add to colocated list
  * \param[in]      orig_rsc  Resource originally requested
  * \param[in,out]  list      Pointer to list to add to
  *
  * \return (Possibly new) head of list
  */
 static inline void
 add_colocated_resources(const pcmk_resource_t *rsc,
                         const pcmk_resource_t *orig_rsc, GList **list)
 {
     *list = rsc->private->cmds->colocated_resources(rsc, orig_rsc, *list);
 }
 
 // Shared implementation of pcmk__assignment_methods_t:colocated_resources()
 GList *
 pcmk__colocated_resources(const pcmk_resource_t *rsc,
                           const pcmk_resource_t *orig_rsc,
                           GList *colocated_rscs)
 {
     const GList *iter = NULL;
     GList *colocations = NULL;
 
     if (orig_rsc == NULL) {
         orig_rsc = rsc;
     }
 
     if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) {
         return colocated_rscs;
     }
 
     pcmk__rsc_trace(orig_rsc, "%s is in colocation chain with %s",
                     rsc->id, orig_rsc->id);
     colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc);
 
     // Follow colocations where this resource is the dependent resource
     colocations = pcmk__this_with_colocations(rsc);
     for (iter = colocations; iter != NULL; iter = iter->next) {
         const pcmk__colocation_t *constraint = iter->data;
         const pcmk_resource_t *primary = constraint->primary;
 
         if (primary == orig_rsc) {
             continue; // Break colocation loop
         }
 
         if ((constraint->score == PCMK_SCORE_INFINITY) &&
             (pcmk__colocation_affects(rsc, primary, constraint,
                                       true) == pcmk__coloc_affects_location)) {
             add_colocated_resources(primary, orig_rsc, &colocated_rscs);
         }
     }
     g_list_free(colocations);
 
     // Follow colocations where this resource is the primary resource
     colocations = pcmk__with_this_colocations(rsc);
     for (iter = colocations; iter != NULL; iter = iter->next) {
         const pcmk__colocation_t *constraint = iter->data;
         const pcmk_resource_t *dependent = constraint->dependent;
 
         if (dependent == orig_rsc) {
             continue; // Break colocation loop
         }
 
         if (pcmk__is_clone(rsc) && !pcmk__is_clone(dependent)) {
             continue; // We can't be sure whether dependent will be colocated
         }
 
         if ((constraint->score == PCMK_SCORE_INFINITY) &&
             (pcmk__colocation_affects(dependent, rsc, constraint,
                                       true) == pcmk__coloc_affects_location)) {
             add_colocated_resources(dependent, orig_rsc, &colocated_rscs);
         }
     }
     g_list_free(colocations);
 
     return colocated_rscs;
 }
 
 // No-op function for variants that don't need to implement add_graph_meta()
 void
 pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml)
 {
 }
 
 /*!
  * \internal
  * \brief Output a summary of scheduled actions for a resource
  *
  * \param[in,out] rsc  Resource to output actions for
  */
 void
 pcmk__output_resource_actions(pcmk_resource_t *rsc)
 {
     pcmk_node_t *next = NULL;
     pcmk_node_t *current = NULL;
     pcmk__output_t *out = NULL;
 
     CRM_ASSERT(rsc != NULL);
 
     out = rsc->private->scheduler->priv;
     if (rsc->private->children != NULL) {
 
         for (GList *iter = rsc->private->children;
              iter != NULL; iter = iter->next) {
 
             pcmk_resource_t *child = (pcmk_resource_t *) iter->data;
 
             child->private->cmds->output_actions(child);
         }
         return;
     }
 
     next = rsc->private->assigned_node;
     if (rsc->private->active_nodes != NULL) {
         current = pcmk__current_node(rsc);
         if (rsc->private->orig_role == pcmk_role_stopped) {
             /* This can occur when resources are being recovered because
              * the current role can change in pcmk__primitive_create_actions()
              */
             rsc->private->orig_role = pcmk_role_started;
         }
     }
 
     if ((current == NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
         /* Don't log stopped orphans */
         return;
     }
 
     out->message(out, "rsc-action", rsc, current, next);
 }
 
 /*!
  * \internal
  * \brief Add a resource to a node's list of assigned resources
  *
  * \param[in,out] node  Node to add resource to
  * \param[in]     rsc   Resource to add
  */
 static inline void
 add_assigned_resource(pcmk_node_t *node, pcmk_resource_t *rsc)
 {
     node->private->assigned_resources =
         g_list_prepend(node->private->assigned_resources, rsc);
 }
 
 /*!
  * \internal
  * \brief Assign a specified resource (of any variant) to a node
  *
  * Assign a specified resource and its children (if any) to a specified node, if
  * the node can run the resource (or unconditionally, if \p force is true). Mark
  * the resources as no longer provisional.
  *
  * If a resource can't be assigned (or \p node is \c NULL), unassign any
  * previous assignment. If \p stop_if_fail is \c true, set next role to stopped
  * and update any existing actions scheduled for the resource.
  *
  * \param[in,out] rsc           Resource to assign
  * \param[in,out] node          Node to assign \p rsc to
  * \param[in]     force         If true, assign to \p node even if unavailable
  * \param[in]     stop_if_fail  If \c true and either \p rsc can't be assigned
  *                              or \p chosen is \c NULL, set next role to
  *                              stopped and update existing actions (if \p rsc
  *                              is not a primitive, this applies to its
  *                              primitive descendants instead)
  *
  * \return \c true if the assignment of \p rsc changed, or \c false otherwise
  *
  * \note Assigning a resource to the NULL node using this function is different
  *       from calling pcmk__unassign_resource(), in that it may also update any
  *       actions created for the resource.
  * \note The \c pcmk__assignment_methods_t:assign() method is preferred, unless
  *       a resource should be assigned to the \c NULL node or every resource in
  *       a tree should be assigned to the same node.
  * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource() can
  *       completely undo the assignment. A successful assignment can be either
  *       undone or left alone as final. A failed assignment has the same effect
  *       as calling pcmk__unassign_resource(); there are no side effects on
  *       roles or actions.
  */
 bool
 pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
                       bool stop_if_fail)
 {
     bool changed = false;
     pcmk_scheduler_t *scheduler = NULL;
 
     CRM_ASSERT(rsc != NULL);
     scheduler = rsc->private->scheduler;
 
     if (rsc->private->children != NULL) {
 
         for (GList *iter = rsc->private->children;
              iter != NULL; iter = iter->next) {
 
             pcmk_resource_t *child_rsc = iter->data;
 
             changed |= pcmk__assign_resource(child_rsc, node, force,
                                              stop_if_fail);
         }
         return changed;
     }
 
     // Assigning a primitive
 
     if (!force && (node != NULL)
         && ((node->assign->score < 0)
             // Allow graph to assume that guest node connections will come up
             || (!pcmk__node_available(node, true, false)
                 && !pcmk__is_guest_or_bundle_node(node)))) {
 
         pcmk__rsc_debug(rsc,
                         "All nodes for resource %s are unavailable, unclean or "
                         "shutting down (%s can%s run resources, with score %s)",
                         rsc->id, pcmk__node_name(node),
                         (pcmk__node_available(node, true, false)? "" : "not"),
                         pcmk_readable_score(node->assign->score));
 
         if (stop_if_fail) {
             pe__set_next_role(rsc, pcmk_role_stopped, "node availability");
         }
         node = NULL;
     }
 
     if (rsc->private->assigned_node != NULL) {
         changed = !pcmk__same_node(rsc->private->assigned_node, node);
     } else {
         changed = (node != NULL);
     }
     pcmk__unassign_resource(rsc);
     pcmk__clear_rsc_flags(rsc, pcmk__rsc_unassigned);
 
     if (node == NULL) {
         char *rc_stopped = NULL;
 
         pcmk__rsc_debug(rsc, "Could not assign %s to a node", rsc->id);
 
         if (!stop_if_fail) {
             return changed;
         }
         pe__set_next_role(rsc, pcmk_role_stopped, "unable to assign");
 
         for (GList *iter = rsc->private->actions;
              iter != NULL; iter = iter->next) {
 
             pcmk_action_t *op = (pcmk_action_t *) iter->data;
 
             pcmk__rsc_debug(rsc, "Updating %s for %s assignment failure",
                             op->uuid, rsc->id);
 
             if (pcmk__str_eq(op->task, PCMK_ACTION_STOP, pcmk__str_none)) {
                 pcmk__clear_action_flags(op, pcmk__action_optional);
 
             } else if (pcmk__str_eq(op->task, PCMK_ACTION_START,
                                     pcmk__str_none)) {
                 pcmk__clear_action_flags(op, pcmk__action_runnable);
 
             } else {
                 // Cancel recurring actions, unless for stopped state
                 const char *interval_ms_s = NULL;
                 const char *target_rc_s = NULL;
 
                 interval_ms_s = g_hash_table_lookup(op->meta,
                                                     PCMK_META_INTERVAL);
                 target_rc_s = g_hash_table_lookup(op->meta,
                                                   PCMK__META_OP_TARGET_RC);
                 if (rc_stopped == NULL) {
                     rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
                 }
 
                 if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
                     && !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
 
                     pcmk__clear_action_flags(op, pcmk__action_runnable);
                 }
             }
         }
         free(rc_stopped);
         return changed;
     }
 
     pcmk__rsc_debug(rsc, "Assigning %s to %s", rsc->id, pcmk__node_name(node));
     rsc->private->assigned_node = pe__copy_node(node);
 
     add_assigned_resource(node, rsc);
     node->private->num_resources++;
     node->assign->count++;
     pcmk__consume_node_capacity(node->private->utilization, rsc);
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_show_utilization)) {
         pcmk__output_t *out = scheduler->priv;
 
         out->message(out, "resource-util", rsc, node, __func__);
     }
     return changed;
 }
 
 /*!
  * \internal
  * \brief Remove any node assignment from a specified resource and its children
  *
  * If a specified resource has been assigned to a node, remove that assignment
  * and mark the resource as provisional again.
  *
  * \param[in,out] rsc  Resource to unassign
  *
  * \note This function is called recursively on \p rsc and its children.
  */
 void
 pcmk__unassign_resource(pcmk_resource_t *rsc)
 {
     pcmk_node_t *old = rsc->private->assigned_node;
 
     if (old == NULL) {
         crm_info("Unassigning %s", rsc->id);
     } else {
         crm_info("Unassigning %s from %s", rsc->id, pcmk__node_name(old));
     }
 
     pcmk__set_rsc_flags(rsc, pcmk__rsc_unassigned);
 
     if (rsc->private->children == NULL) {
         if (old == NULL) {
             return;
         }
         rsc->private->assigned_node = NULL;
 
         /* We're going to free the pcmk_node_t, but its details member is shared
          * and will remain, so update that appropriately first.
          */
         old->private->assigned_resources =
             g_list_remove(old->private->assigned_resources, rsc);
         old->private->num_resources--;
         pcmk__release_node_capacity(old->private->utilization, rsc);
         free(old);
         return;
     }
 
     for (GList *iter = rsc->private->children;
          iter != NULL; iter = iter->next) {
 
         pcmk__unassign_resource((pcmk_resource_t *) iter->data);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has reached its migration threshold on a node
  *
  * \param[in,out] rsc       Resource to check
  * \param[in]     node      Node to check
  * \param[out]    failed    If threshold has been reached, this will be set to
  *                          resource that failed (possibly a parent of \p rsc)
  *
  * \return true if the migration threshold has been reached, false otherwise
  */
 bool
 pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
                         pcmk_resource_t **failed)
 {
     int fail_count, remaining_tries;
     pcmk_resource_t *rsc_to_ban = rsc;
 
     // Migration threshold of 0 means never force away
     if (rsc->private->ban_after_failures == 0) {
         return false;
     }
 
     // If we're ignoring failures, also ignore the migration threshold
     if (pcmk_is_set(rsc->flags, pcmk__rsc_ignore_failure)) {
         return false;
     }
 
     // If there are no failures, there's no need to force away
     fail_count = pe_get_failcount(node, rsc, NULL,
                                   pcmk__fc_effective|pcmk__fc_launched, NULL);
     if (fail_count <= 0) {
         return false;
     }
 
     // If failed resource is anonymous clone instance, we'll force clone away
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
         rsc_to_ban = uber_parent(rsc);
     }
 
     // How many more times recovery will be tried on this node
     remaining_tries = rsc->private->ban_after_failures - fail_count;
 
     if (remaining_tries <= 0) {
-        pcmk__sched_warn("%s cannot run on %s due to reaching migration "
+        pcmk__sched_warn(rsc->private->scheduler,
+                         "%s cannot run on %s due to reaching migration "
                          "threshold (clean up resource to allow again)"
                          QB_XS " failures=%d "
                          PCMK_META_MIGRATION_THRESHOLD "=%d",
                          rsc_to_ban->id, pcmk__node_name(node), fail_count,
                          rsc->private->ban_after_failures);
         if (failed != NULL) {
             *failed = rsc_to_ban;
         }
         return true;
     }
 
     crm_info("%s can fail %d more time%s on "
              "%s before reaching migration threshold (%d)",
              rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries),
              pcmk__node_name(node), rsc->private->ban_after_failures);
     return false;
 }
 
 /*!
  * \internal
  * \brief Get a node's score
  *
  * \param[in] node     Node with ID to check
  * \param[in] nodes    List of nodes to look for \p node score in
  *
  * \return Node's score, or -INFINITY if not found
  */
 static int
 get_node_score(const pcmk_node_t *node, GHashTable *nodes)
 {
     pcmk_node_t *found_node = NULL;
 
     if ((node != NULL) && (nodes != NULL)) {
         found_node = g_hash_table_lookup(nodes, node->private->id);
     }
     if (found_node == NULL) {
         return -PCMK_SCORE_INFINITY;
     }
     return found_node->assign->score;
 }
 
 /*!
  * \internal
  * \brief Compare two resources according to which should be assigned first
  *
  * \param[in] a     First resource to compare
  * \param[in] b     Second resource to compare
  * \param[in] data  Sorted list of all nodes in cluster
  *
  * \return -1 if \p a should be assigned before \b, 0 if they are equal,
  *         or +1 if \p a should be assigned after \b
  */
 static gint
 cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
 {
     /* GLib insists that this function require gconstpointer arguments, but we
      * make a small, temporary change to each argument (setting the
      * pe_rsc_merging flag) during comparison
      */
     pcmk_resource_t *resource1 = (pcmk_resource_t *) a;
     pcmk_resource_t *resource2 = (pcmk_resource_t *) b;
     const GList *nodes = data;
 
     int rc = 0;
     int r1_score = -PCMK_SCORE_INFINITY;
     int r2_score = -PCMK_SCORE_INFINITY;
     pcmk_node_t *r1_node = NULL;
     pcmk_node_t *r2_node = NULL;
     GHashTable *r1_nodes = NULL;
     GHashTable *r2_nodes = NULL;
     const char *reason = NULL;
 
     // Resources with highest priority should be assigned first
     reason = "priority";
     r1_score = resource1->private->priority;
     r2_score = resource2->private->priority;
     if (r1_score > r2_score) {
         rc = -1;
         goto done;
     }
     if (r1_score < r2_score) {
         rc = 1;
         goto done;
     }
 
     // We need nodes to make any other useful comparisons
     reason = "no node list";
     if (nodes == NULL) {
         goto done;
     }
 
     // Calculate and log node scores
     resource1->private->cmds->add_colocated_node_scores(resource1, NULL,
                                                         resource1->id,
                                                         &r1_nodes, NULL, 1,
                                                         pcmk__coloc_select_this_with);
     resource2->private->cmds->add_colocated_node_scores(resource2, NULL,
                                                         resource2->id,
                                                         &r2_nodes, NULL, 1,
                                                         pcmk__coloc_select_this_with);
     pe__show_node_scores(true, NULL, resource1->id, r1_nodes,
                          resource1->private->scheduler);
     pe__show_node_scores(true, NULL, resource2->id, r2_nodes,
                          resource2->private->scheduler);
 
     // The resource with highest score on its current node goes first
     reason = "current location";
     if (resource1->private->active_nodes != NULL) {
         r1_node = pcmk__current_node(resource1);
     }
     if (resource2->private->active_nodes != NULL) {
         r2_node = pcmk__current_node(resource2);
     }
     r1_score = get_node_score(r1_node, r1_nodes);
     r2_score = get_node_score(r2_node, r2_nodes);
     if (r1_score > r2_score) {
         rc = -1;
         goto done;
     }
     if (r1_score < r2_score) {
         rc = 1;
         goto done;
     }
 
     // Otherwise a higher score on any node will do
     reason = "score";
     for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
         const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
 
         r1_score = get_node_score(node, r1_nodes);
         r2_score = get_node_score(node, r2_nodes);
         if (r1_score > r2_score) {
             rc = -1;
             goto done;
         }
         if (r1_score < r2_score) {
             rc = 1;
             goto done;
         }
     }
 
 done:
     crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
               resource1->id, r1_score,
               ((r1_node == NULL)? "" : " on "),
               ((r1_node == NULL)? "" : r1_node->private->id),
               ((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
               resource2->id, r2_score,
               ((r2_node == NULL)? "" : " on "),
               ((r2_node == NULL)? "" : r2_node->private->id),
               reason);
     if (r1_nodes != NULL) {
         g_hash_table_destroy(r1_nodes);
     }
     if (r2_nodes != NULL) {
         g_hash_table_destroy(r2_nodes);
     }
     return rc;
 }
 
 /*!
  * \internal
  * \brief Sort resources in the order they should be assigned to nodes
  *
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__sort_resources(pcmk_scheduler_t *scheduler)
 {
     GList *nodes = g_list_copy(scheduler->nodes);
 
     nodes = pcmk__sort_nodes(nodes, NULL);
     scheduler->resources = g_list_sort_with_data(scheduler->resources,
                                                  cmp_resources, nodes);
     g_list_free(nodes);
 }
diff --git a/lib/pacemaker/pcmk_scheduler.c b/lib/pacemaker/pcmk_scheduler.c
index 74d91ebbc4..39c95ecfc9 100644
--- a/lib/pacemaker/pcmk_scheduler.c
+++ b/lib/pacemaker/pcmk_scheduler.c
@@ -1,907 +1,908 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/cib/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/scheduler_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 CRM_TRACE_INIT_DATA(pacemaker);
 
 /*!
  * \internal
  * \brief Do deferred action checks after assignment
  *
  * When unpacking the resource history, the scheduler checks for resource
  * configurations that have changed since an action was run. However, at that
  * time, bundles using the REMOTE_CONTAINER_HACK don't have their final
  * parameter information, so instead they add a deferred check to a list. This
  * function processes one entry in that list.
  *
  * \param[in,out] rsc     Resource that action history is for
  * \param[in,out] node    Node that action history is for
  * \param[in]     rsc_op  Action history entry
  * \param[in]     check   Type of deferred check to do
  */
 static void
 check_params(pcmk_resource_t *rsc, pcmk_node_t *node, const xmlNode *rsc_op,
              enum pcmk__check_parameters check)
 {
     const char *reason = NULL;
     pcmk__op_digest_t *digest_data = NULL;
 
     switch (check) {
         case pcmk__check_active:
             if (pcmk__check_action_config(rsc, node, rsc_op)
                 && pe_get_failcount(node, rsc, NULL, pcmk__fc_effective,
                                     NULL)) {
                 reason = "action definition changed";
             }
             break;
 
         case pcmk__check_last_failure:
             digest_data = rsc_action_digest_cmp(rsc, rsc_op, node,
                                                 rsc->private->scheduler);
             switch (digest_data->rc) {
                 case pcmk__digest_unknown:
                     crm_trace("Resource %s history entry %s on %s has "
                               "no digest to compare",
                               rsc->id, pcmk__xe_id(rsc_op), node->private->id);
                     break;
                 case pcmk__digest_match:
                     break;
                 default:
                     reason = "resource parameters have changed";
                     break;
             }
             break;
     }
     if (reason != NULL) {
         pe__clear_failcount(rsc, node, reason, rsc->private->scheduler);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has failcount clearing scheduled on a node
  *
  * \param[in] node  Node to check
  * \param[in] rsc   Resource to check
  *
  * \return true if \p rsc has failcount clearing scheduled on \p node,
  *         otherwise false
  */
 static bool
 failcount_clear_action_exists(const pcmk_node_t *node,
                               const pcmk_resource_t *rsc)
 {
     GList *list = pe__resource_actions(rsc, node, PCMK_ACTION_CLEAR_FAILCOUNT,
                                        TRUE);
 
     if (list != NULL) {
         g_list_free(list);
         return true;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Ban a resource from a node if it reached its failure threshold there
  *
  * \param[in,out] data       Resource to check failure threshold for
  * \param[in]     user_data  Node to check resource on
  */
 static void
 check_failure_threshold(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *rsc = data;
     const pcmk_node_t *node = user_data;
 
     // If this is a collective resource, apply recursively to children instead
     if (rsc->private->children != NULL) {
         g_list_foreach(rsc->private->children, check_failure_threshold,
                        user_data);
         return;
     }
 
     if (!failcount_clear_action_exists(node, rsc)) {
         /* Don't force the resource away from this node due to a failcount
          * that's going to be cleared.
          *
          * @TODO Failcount clearing can be scheduled in
          * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
          * schedule_resource_actions() via check_params(). This runs well before
          * then, so it cannot detect those, meaning we might check the migration
          * threshold when we shouldn't. Worst case, we stop or move the
          * resource, then move it back in the next transition.
          */
         pcmk_resource_t *failed = NULL;
 
         if (pcmk__threshold_reached(rsc, node, &failed)) {
             resource_location(failed, node, -PCMK_SCORE_INFINITY,
                               "__fail_limit__", rsc->private->scheduler);
         }
     }
 }
 
 /*!
  * \internal
  * \brief If resource has exclusive discovery, ban node if not allowed
  *
  * Location constraints have a PCMK_XA_RESOURCE_DISCOVERY option that allows
  * users to specify where probes are done for the affected resource. If this is
  * set to \c exclusive, probes will only be done on nodes listed in exclusive
  * constraints. This function bans the resource from the node if the node is not
  * listed.
  *
  * \param[in,out] data       Resource to check
  * \param[in]     user_data  Node to check resource on
  */
 static void
 apply_exclusive_discovery(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *rsc = data;
     const pcmk_node_t *node = user_data;
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)
         || pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                        pcmk__rsc_exclusive_probes)) {
         pcmk_node_t *match = NULL;
 
         // If this is a collective resource, apply recursively to children
         g_list_foreach(rsc->private->children, apply_exclusive_discovery,
                        user_data);
 
         match = g_hash_table_lookup(rsc->private->allowed_nodes,
                                     node->private->id);
         if ((match != NULL)
             && (match->assign->probe_mode != pcmk__probe_exclusive)) {
             match->assign->score = -PCMK_SCORE_INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Apply stickiness to a resource if appropriate
  *
  * \param[in,out] data       Resource to check for stickiness
  * \param[in]     user_data  Ignored
  */
 static void
 apply_stickiness(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *rsc = data;
     pcmk_node_t *node = NULL;
 
     // If this is a collective resource, apply recursively to children instead
     if (rsc->private->children != NULL) {
         g_list_foreach(rsc->private->children, apply_stickiness, NULL);
         return;
     }
 
     /* A resource is sticky if it is managed, has stickiness configured, and is
      * active on a single node.
      */
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)
         || (rsc->private->stickiness < 1)
         || !pcmk__list_of_1(rsc->private->active_nodes)) {
         return;
     }
 
     node = rsc->private->active_nodes->data;
 
     /* In a symmetric cluster, stickiness can always be used. In an
      * asymmetric cluster, we have to check whether the resource is still
      * allowed on the node, so we don't keep the resource somewhere it is no
      * longer explicitly enabled.
      */
     if (!pcmk_is_set(rsc->private->scheduler->flags,
                      pcmk_sched_symmetric_cluster)
         && (g_hash_table_lookup(rsc->private->allowed_nodes,
                                 node->private->id) == NULL)) {
         pcmk__rsc_debug(rsc,
                         "Ignoring %s stickiness because the cluster is "
                         "asymmetric and %s is not explicitly allowed",
                         rsc->id, pcmk__node_name(node));
         return;
     }
 
     pcmk__rsc_debug(rsc, "Resource %s has %d stickiness on %s",
                     rsc->id, rsc->private->stickiness, pcmk__node_name(node));
     resource_location(rsc, node, rsc->private->stickiness, "stickiness",
                       rsc->private->scheduler);
 }
 
 /*!
  * \internal
  * \brief Apply shutdown locks for all resources as appropriate
  *
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 apply_shutdown_locks(pcmk_scheduler_t *scheduler)
 {
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
         return;
     }
     for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
 
         rsc->private->cmds->shutdown_lock(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Calculate the number of available nodes in the cluster
  *
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 count_available_nodes(pcmk_scheduler_t *scheduler)
 {
     if (pcmk_is_set(scheduler->flags, pcmk_sched_no_compat)) {
         return;
     }
 
     // @COMPAT for API backward compatibility only (cluster does not use value)
     for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
         pcmk_node_t *node = (pcmk_node_t *) iter->data;
 
         if ((node != NULL) && (node->assign->score >= 0)
             && node->details->online
             && (node->private->variant != pcmk__node_variant_ping)) {
             scheduler->max_valid_nodes++;
         }
     }
     crm_trace("Online node count: %d", scheduler->max_valid_nodes);
 }
 
 /*
  * \internal
  * \brief Apply node-specific scheduling criteria
  *
  * After the CIB has been unpacked, process node-specific scheduling criteria
  * including shutdown locks, location constraints, resource stickiness,
  * migration thresholds, and exclusive resource discovery.
  */
 static void
 apply_node_criteria(pcmk_scheduler_t *scheduler)
 {
     crm_trace("Applying node-specific scheduling criteria");
     apply_shutdown_locks(scheduler);
     count_available_nodes(scheduler);
     pcmk__apply_locations(scheduler);
     g_list_foreach(scheduler->resources, apply_stickiness, NULL);
 
     for (GList *node_iter = scheduler->nodes; node_iter != NULL;
          node_iter = node_iter->next) {
         for (GList *rsc_iter = scheduler->resources; rsc_iter != NULL;
              rsc_iter = rsc_iter->next) {
             check_failure_threshold(rsc_iter->data, node_iter->data);
             apply_exclusive_discovery(rsc_iter->data, node_iter->data);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Assign resources to nodes
  *
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 assign_resources(pcmk_scheduler_t *scheduler)
 {
     GList *iter = NULL;
 
     crm_trace("Assigning resources to nodes");
 
     if (!pcmk__str_eq(scheduler->placement_strategy, PCMK_VALUE_DEFAULT,
                       pcmk__str_casei)) {
         pcmk__sort_resources(scheduler);
     }
     pcmk__show_node_capacities("Original", scheduler);
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
         /* Assign remote connection resources first (which will also assign any
          * colocation dependencies). If the connection is migrating, always
          * prefer the partial migration target.
          */
         for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
             pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
             const pcmk_node_t *target = rsc->private->partial_migration_target;
 
             if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
                 pcmk__rsc_trace(rsc, "Assigning remote connection resource '%s'",
                                 rsc->id);
                 rsc->private->cmds->assign(rsc, target, true);
             }
         }
     }
 
     /* now do the rest of the resources */
     for (iter = scheduler->resources; iter != NULL; iter = iter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
 
         if (!pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
             pcmk__rsc_trace(rsc, "Assigning %s resource '%s'",
                             rsc->private->xml->name, rsc->id);
             rsc->private->cmds->assign(rsc, NULL, true);
         }
     }
 
     pcmk__show_node_capacities("Remaining", scheduler);
 }
 
 /*!
  * \internal
  * \brief Schedule fail count clearing on online nodes if resource is orphaned
  *
  * \param[in,out] data       Resource to check
  * \param[in]     user_data  Ignored
  */
 static void
 clear_failcounts_if_orphaned(gpointer data, gpointer user_data)
 {
     pcmk_resource_t *rsc = data;
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
         return;
     }
     crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
 
     /* There's no need to recurse into rsc->private->children because those
      * should just be unassigned clone instances.
      */
 
     for (GList *iter = rsc->private->scheduler->nodes;
          iter != NULL; iter = iter->next) {
 
         pcmk_node_t *node = (pcmk_node_t *) iter->data;
         pcmk_action_t *clear_op = NULL;
 
         if (!node->details->online) {
             continue;
         }
         if (pe_get_failcount(node, rsc, NULL, pcmk__fc_effective, NULL) == 0) {
             continue;
         }
 
         clear_op = pe__clear_failcount(rsc, node, "it is orphaned",
                                        rsc->private->scheduler);
 
         /* We can't use order_action_then_stop() here because its
          * pcmk__ar_guest_allowed breaks things
          */
         pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
                            NULL, pcmk__ar_ordered, rsc->private->scheduler);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule any resource actions needed
  *
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 schedule_resource_actions(pcmk_scheduler_t *scheduler)
 {
     // Process deferred action checks
     pe__foreach_param_check(scheduler, check_params);
     pe__free_param_checks(scheduler);
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
         crm_trace("Scheduling probes");
         pcmk__schedule_probes(scheduler);
     }
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
         g_list_foreach(scheduler->resources, clear_failcounts_if_orphaned,
                        NULL);
     }
 
     crm_trace("Scheduling resource actions");
     for (GList *iter = scheduler->resources; iter != NULL; iter = iter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) iter->data;
 
         rsc->private->cmds->create_actions(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource or any of its descendants are managed
  *
  * \param[in] rsc  Resource to check
  *
  * \return true if resource or any descendant is managed, otherwise false
  */
 static bool
 is_managed(const pcmk_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
         return true;
     }
     for (GList *iter = rsc->private->children;
          iter != NULL; iter = iter->next) {
 
         if (is_managed((pcmk_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether any resources in the cluster are managed
  *
  * \param[in] scheduler  Scheduler data
  *
  * \return true if any resource is managed, otherwise false
  */
 static bool
 any_managed_resources(const pcmk_scheduler_t *scheduler)
 {
     for (const GList *iter = scheduler->resources;
          iter != NULL; iter = iter->next) {
         if (is_managed((const pcmk_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a node requires fencing
  *
  * \param[in] node          Node to check
  * \param[in] have_managed  Whether any resource in cluster is managed
  *
  * \return true if \p node should be fenced, otherwise false
  */
 static bool
 needs_fencing(const pcmk_node_t *node, bool have_managed)
 {
     return have_managed && node->details->unclean
            && pe_can_fence(node->private->scheduler, node);
 }
 
 /*!
  * \internal
  * \brief Check whether a node requires shutdown
  *
  * \param[in] node          Node to check
  *
  * \return true if \p node should be shut down, otherwise false
  */
 static bool
 needs_shutdown(const pcmk_node_t *node)
 {
     if (pcmk__is_pacemaker_remote_node(node)) {
        /* Do not send shutdown actions for Pacemaker Remote nodes.
         * @TODO We might come up with a good use for this in the future.
         */
         return false;
     }
     return node->details->online && node->details->shutdown;
 }
 
 /*!
  * \internal
  * \brief Track and order non-DC fencing
  *
  * \param[in,out] list       List of existing non-DC fencing actions
  * \param[in,out] action     Fencing action to prepend to \p list
  * \param[in]     scheduler  Scheduler data
  *
  * \return (Possibly new) head of \p list
  */
 static GList *
 add_nondc_fencing(GList *list, pcmk_action_t *action,
                   const pcmk_scheduler_t *scheduler)
 {
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)
         && (list != NULL)) {
         /* Concurrent fencing is disabled, so order each non-DC
          * fencing in a chain. If there is any DC fencing or
          * shutdown, it will be ordered after the last action in the
          * chain later.
          */
         order_actions((pcmk_action_t *) list->data, action, pcmk__ar_ordered);
     }
     return g_list_prepend(list, action);
 }
 
 /*!
  * \internal
  * \brief Schedule a node for fencing
  *
  * \param[in,out] node      Node that requires fencing
  */
 static pcmk_action_t *
 schedule_fencing(pcmk_node_t *node)
 {
     pcmk_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
                                        FALSE, node->private->scheduler);
 
-    pcmk__sched_warn("Scheduling node %s for fencing", pcmk__node_name(node));
+    pcmk__sched_warn(node->private->scheduler, "Scheduling node %s for fencing",
+                     pcmk__node_name(node));
     pcmk__order_vs_fence(fencing, node->private->scheduler);
     return fencing;
 }
 
 /*!
  * \internal
  * \brief Create and order node fencing and shutdown actions
  *
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 schedule_fencing_and_shutdowns(pcmk_scheduler_t *scheduler)
 {
     pcmk_action_t *dc_down = NULL;
     bool integrity_lost = false;
     bool have_managed = any_managed_resources(scheduler);
     GList *fencing_ops = NULL;
     GList *shutdown_ops = NULL;
 
     crm_trace("Scheduling fencing and shutdowns as needed");
     if (!have_managed) {
         crm_notice("No fencing will be done until there are resources "
                    "to manage");
     }
 
     // Check each node for whether it needs fencing or shutdown
     for (GList *iter = scheduler->nodes; iter != NULL; iter = iter->next) {
         pcmk_node_t *node = (pcmk_node_t *) iter->data;
         pcmk_action_t *fencing = NULL;
         const bool is_dc = pcmk__same_node(node, scheduler->dc_node);
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (pcmk__is_guest_or_bundle_node(node)) {
             if (pcmk_is_set(node->private->flags, pcmk__node_remote_reset)
                 && have_managed && pe_can_fence(scheduler, node)) {
                 pcmk__fence_guest(node);
             }
             continue;
         }
 
         if (needs_fencing(node, have_managed)) {
             fencing = schedule_fencing(node);
 
             // Track DC and non-DC fence actions separately
             if (is_dc) {
                 dc_down = fencing;
             } else {
                 fencing_ops = add_nondc_fencing(fencing_ops, fencing,
                                                 scheduler);
             }
 
         } else if (needs_shutdown(node)) {
             pcmk_action_t *down_op = pcmk__new_shutdown_action(node);
 
             // Track DC and non-DC shutdown actions separately
             if (is_dc) {
                 dc_down = down_op;
             } else {
                 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
             }
         }
 
         if ((fencing == NULL) && node->details->unclean) {
             integrity_lost = true;
             pcmk__config_warn("Node %s is unclean but cannot be fenced",
                               pcmk__node_name(node));
         }
     }
 
     if (integrity_lost) {
         if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
             pcmk__config_warn("Resource functionality and data integrity "
                               "cannot be guaranteed (configure, enable, "
                               "and test fencing to correct this)");
 
         } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
             crm_notice("Unclean nodes will not be fenced until quorum is "
                        "attained or " PCMK_OPT_NO_QUORUM_POLICY " is set to "
                        PCMK_VALUE_IGNORE);
         }
     }
 
     if (dc_down != NULL) {
         /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
          * DC elections. However, we don't want to order non-DC shutdowns before
          * a DC *fencing*, because even though we don't want a node that's
          * shutting down to become DC, the DC fencing could be ordered before a
          * clone stop that's also ordered before the shutdowns, thus leading to
          * a graph loop.
          */
         if (pcmk__str_eq(dc_down->task, PCMK_ACTION_DO_SHUTDOWN,
                          pcmk__str_none)) {
             pcmk__order_after_each(dc_down, shutdown_ops);
         }
 
         // Order any non-DC fencing before any DC fencing or shutdown
 
         if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
             /* With concurrent fencing, order each non-DC fencing action
              * separately before any DC fencing or shutdown.
              */
             pcmk__order_after_each(dc_down, fencing_ops);
         } else if (fencing_ops != NULL) {
             /* Without concurrent fencing, the non-DC fencing actions are
              * already ordered relative to each other, so we just need to order
              * the DC fencing after the last action in the chain (which is the
              * first item in the list).
              */
             order_actions((pcmk_action_t *) fencing_ops->data, dc_down,
                           pcmk__ar_ordered);
         }
     }
     g_list_free(fencing_ops);
     g_list_free(shutdown_ops);
 }
 
 static void
 log_resource_details(pcmk_scheduler_t *scheduler)
 {
     pcmk__output_t *out = scheduler->priv;
     GList *all = NULL;
 
     /* Due to the `crm_mon --node=` feature, out->message() for all the
      * resource-related messages expects a list of nodes that we are allowed to
      * output information for. Here, we create a wildcard to match all nodes.
      */
     all = g_list_prepend(all, (gpointer) "*");
 
     for (GList *item = scheduler->resources; item != NULL; item = item->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) item->data;
 
         // Log all resources except inactive orphans
         if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)
             || (rsc->private->orig_role != pcmk_role_stopped)) {
             out->message(out, pcmk__map_element_name(rsc->private->xml), 0UL,
                          rsc, all, all);
         }
     }
 
     g_list_free(all);
 }
 
 static void
 log_all_actions(pcmk_scheduler_t *scheduler)
 {
     /* This only ever outputs to the log, so ignore whatever output object was
      * previously set and just log instead.
      */
     pcmk__output_t *prev_out = scheduler->priv;
     pcmk__output_t *out = NULL;
 
     if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
         return;
     }
 
     pe__register_messages(out);
     pcmk__register_lib_messages(out);
     pcmk__output_set_log_level(out, LOG_NOTICE);
     scheduler->priv = out;
 
     out->begin_list(out, NULL, NULL, "Actions");
     pcmk__output_actions(scheduler);
     out->end_list(out);
     out->finish(out, CRM_EX_OK, true, NULL);
     pcmk__output_free(out);
 
     scheduler->priv = prev_out;
 }
 
 /*!
  * \internal
  * \brief Log all required but unrunnable actions at trace level
  *
  * \param[in] scheduler  Scheduler data
  */
 static void
 log_unrunnable_actions(const pcmk_scheduler_t *scheduler)
 {
     const uint64_t flags = pcmk__action_optional
                            |pcmk__action_runnable
                            |pcmk__action_pseudo;
 
     crm_trace("Required but unrunnable actions:");
     for (const GList *iter = scheduler->actions;
          iter != NULL; iter = iter->next) {
 
         const pcmk_action_t *action = (const pcmk_action_t *) iter->data;
 
         if (!pcmk_any_flags_set(action->flags, flags)) {
             pcmk__log_action("\t", action, true);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Unpack the CIB for scheduling
  *
  * \param[in,out] cib        CIB XML to unpack (may be NULL if already unpacked)
  * \param[in]     flags      Scheduler flags to set in addition to defaults
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 unpack_cib(xmlNode *cib, unsigned long long flags, pcmk_scheduler_t *scheduler)
 {
     const char* localhost_save = NULL;
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_have_status)) {
         crm_trace("Reusing previously calculated cluster status");
         pcmk__set_scheduler_flags(scheduler, flags);
         return;
     }
 
     if (scheduler->localhost) {
         localhost_save = scheduler->localhost;
     }
 
     CRM_ASSERT(cib != NULL);
     crm_trace("Calculating cluster status");
 
     /* This will zero the entire struct without freeing anything first, so
      * callers should never call pcmk__schedule_actions() with a populated data
      * set unless pcmk_sched_have_status is set (i.e. cluster_status() was
      * previously called, whether directly or via pcmk__schedule_actions()).
      */
     set_working_set_defaults(scheduler);
 
     if (localhost_save) {
         scheduler->localhost = localhost_save;
     }
 
     pcmk__set_scheduler_flags(scheduler, flags);
     scheduler->input = cib;
     cluster_status(scheduler); // Sets pcmk_sched_have_status
 }
 
 /*!
  * \internal
  * \brief Run the scheduler for a given CIB
  *
  * \param[in,out] cib        CIB XML to use as scheduler input
  * \param[in]     flags      Scheduler flags to set in addition to defaults
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
                        pcmk_scheduler_t *scheduler)
 {
     unpack_cib(cib, flags, scheduler);
     pcmk__set_assignment_methods(scheduler);
     pcmk__apply_node_health(scheduler);
     pcmk__unpack_constraints(scheduler);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_validate_only)) {
         return;
     }
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)
         && pcmk__is_daemon) {
         log_resource_details(scheduler);
     }
 
     apply_node_criteria(scheduler);
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
         return;
     }
 
     pcmk__create_internal_constraints(scheduler);
     pcmk__handle_rsc_config_changes(scheduler);
     assign_resources(scheduler);
     schedule_resource_actions(scheduler);
 
     /* Remote ordering constraints need to happen prior to calculating fencing
      * because it is one more place we can mark nodes as needing fencing.
      */
     pcmk__order_remote_connection_actions(scheduler);
 
     schedule_fencing_and_shutdowns(scheduler);
     pcmk__apply_orderings(scheduler);
     log_all_actions(scheduler);
     pcmk__create_graph(scheduler);
 
     if (get_crm_log_level() == LOG_TRACE) {
         log_unrunnable_actions(scheduler);
     }
 }
 
 /*!
  * \internal
  * \brief Initialize scheduler data
  *
  * Make our own copies of the CIB XML and date/time object, if they're not
  * \c NULL. This way we don't have to take ownership of the objects passed via
  * the API.
  *
  * This function is most useful for public API functions that want the caller
  * to retain ownership of the CIB object
  *
  * \param[in,out] out        Output object
  * \param[in]     input      The CIB XML to check (if \c NULL, use current CIB)
  * \param[in]     date       Date and time to use in the scheduler (if \c NULL,
  *                           use current date and time).  This can be used for
  *                           checking whether a rule is in effect at a certa
  *                           date and time.
  * \param[out]    scheduler  Where to store initialized scheduler data
  *
  * \return Standard Pacemaker return code
  */
 int
 pcmk__init_scheduler(pcmk__output_t *out, xmlNodePtr input, const crm_time_t *date,
                      pcmk_scheduler_t **scheduler)
 {
     // Allows for cleaner syntax than dereferencing the scheduler argument
     pcmk_scheduler_t *new_scheduler = NULL;
 
     new_scheduler = pe_new_working_set();
     if (new_scheduler == NULL) {
         return ENOMEM;
     }
 
     pcmk__set_scheduler_flags(new_scheduler,
                               pcmk_sched_no_counts|pcmk_sched_no_compat);
 
     // Populate the scheduler data
 
     // Make our own copy of the given input or fetch the CIB and use that
     if (input != NULL) {
         new_scheduler->input = pcmk__xml_copy(NULL, input);
         if (new_scheduler->input == NULL) {
             out->err(out, "Failed to copy input XML");
             pe_free_working_set(new_scheduler);
             return ENOMEM;
         }
 
     } else {
         int rc = cib__signon_query(out, NULL, &(new_scheduler->input));
 
         if (rc != pcmk_rc_ok) {
             pe_free_working_set(new_scheduler);
             return rc;
         }
     }
 
     // Make our own copy of the given crm_time_t object; otherwise
     // cluster_status() populates with the current time
     if (date != NULL) {
         // pcmk_copy_time() guarantees non-NULL
         new_scheduler->now = pcmk_copy_time(date);
     }
 
     // Unpack everything
     cluster_status(new_scheduler);
     *scheduler = new_scheduler;
 
     return pcmk_rc_ok;
 }
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index e21d184fd6..a1f162ec95 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1269 +1,1271 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/common/scheduler_internal.h>
 
 #include "pe_status_private.h"
 
 void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
 
 static pcmk_node_t *active_node(const pcmk_resource_t *rsc,
                                 unsigned int *count_all,
                                 unsigned int *count_clean);
 
 static pcmk__rsc_methods_t resource_class_functions[] = {
     {
          native_unpack,
          native_find_rsc,
          native_parameter,
          native_active,
          native_resource_state,
          native_location,
          native_free,
          pe__count_common,
          pe__native_is_filtered,
          active_node,
          pe__primitive_max_per_node,
     },
     {
          group_unpack,
          native_find_rsc,
          native_parameter,
          group_active,
          group_resource_state,
          native_location,
          group_free,
          pe__count_common,
          pe__group_is_filtered,
          active_node,
          pe__group_max_per_node,
     },
     {
          clone_unpack,
          native_find_rsc,
          native_parameter,
          clone_active,
          clone_resource_state,
          native_location,
          clone_free,
          pe__count_common,
          pe__clone_is_filtered,
          active_node,
          pe__clone_max_per_node,
     },
     {
          pe__unpack_bundle,
          native_find_rsc,
          native_parameter,
          pe__bundle_active,
          pe__bundle_resource_state,
          native_location,
          pe__free_bundle,
          pe__count_bundle,
          pe__bundle_is_filtered,
          pe__bundle_active_node,
          pe__bundle_max_per_node,
     }
 };
 
 static enum pcmk__rsc_variant
 get_resource_type(const char *name)
 {
     if (pcmk__str_eq(name, PCMK_XE_PRIMITIVE, pcmk__str_casei)) {
         return pcmk__rsc_variant_primitive;
 
     } else if (pcmk__str_eq(name, PCMK_XE_GROUP, pcmk__str_casei)) {
         return pcmk__rsc_variant_group;
 
     } else if (pcmk__str_eq(name, PCMK_XE_CLONE, pcmk__str_casei)) {
         return pcmk__rsc_variant_clone;
 
     } else if (pcmk__str_eq(name, PCMK__XE_PROMOTABLE_LEGACY,
                             pcmk__str_casei)) {
         // @COMPAT deprecated since 2.0.0
         return pcmk__rsc_variant_clone;
 
     } else if (pcmk__str_eq(name, PCMK_XE_BUNDLE, pcmk__str_casei)) {
         return pcmk__rsc_variant_bundle;
     }
 
     return pcmk__rsc_variant_unknown;
 }
 
 /*!
  * \internal
  * \brief Insert a meta-attribute if not already present
  *
  * \param[in]     key    Meta-attribute name
  * \param[in]     value  Meta-attribute value to add if not already present
  * \param[in,out] table  Meta-attribute hash table to insert into
  *
  * \note This is like pcmk__insert_meta() except it won't overwrite existing
  *       values.
  */
 static void
 dup_attr(gpointer key, gpointer value, gpointer user_data)
 {
     GHashTable *table = user_data;
 
     CRM_CHECK((key != NULL) && (table != NULL), return);
     if (pcmk__str_eq((const char *) value, "#default", pcmk__str_casei)) {
         // @COMPAT Deprecated since 2.1.8
         pcmk__config_warn("Support for setting meta-attributes (such as %s) to "
                           "the explicit value '#default' is deprecated and "
                           "will be removed in a future release",
                           (const char *) key);
     } else if ((value != NULL) && (g_hash_table_lookup(table, key) == NULL)) {
         pcmk__insert_dup(table, (const char *) key, (const char *) value);
     }
 }
 
 static void
 expand_parents_fixed_nvpairs(pcmk_resource_t *rsc,
                              pe_rule_eval_data_t *rule_data,
                              GHashTable *meta_hash, pcmk_scheduler_t *scheduler)
 {
     GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
     pcmk_resource_t *p = rsc->private->parent;
 
     if (p == NULL) {
         return ;
     }
 
     /* Search all parent resources, get the fixed value of
      * PCMK_XE_META_ATTRIBUTES set only in the original xml, and stack it in the
      * hash table. The fixed value of the lower parent resource takes precedence
      * and is not overwritten.
      */
     while(p != NULL) {
         /* A hash table for comparison is generated, including the id-ref. */
         pe__unpack_dataset_nvpairs(p->private->xml, PCMK_XE_META_ATTRIBUTES,
                                    rule_data, parent_orig_meta, NULL, FALSE,
                                    scheduler);
         p = p->private->parent;
     }
 
     if (parent_orig_meta != NULL) {
         // This will not overwrite any values already existing for child
         g_hash_table_foreach(parent_orig_meta, dup_attr, meta_hash);
     }
 
     if (parent_orig_meta != NULL) {
         g_hash_table_destroy(parent_orig_meta);
     }
     
     return ;
 
 }
 void
 get_meta_attributes(GHashTable * meta_hash, pcmk_resource_t * rsc,
                     pcmk_node_t *node, pcmk_scheduler_t *scheduler)
 {
     pe_rsc_eval_data_t rsc_rule_data = {
         .standard = crm_element_value(rsc->private->xml, PCMK_XA_CLASS),
         .provider = crm_element_value(rsc->private->xml, PCMK_XA_PROVIDER),
         .agent = crm_element_value(rsc->private->xml, PCMK_XA_TYPE)
     };
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .now = scheduler->now,
         .match_data = NULL,
         .rsc_data = &rsc_rule_data,
         .op_data = NULL
     };
 
     if (node) {
         /* @COMPAT Support for node attribute expressions in rules for
          * meta-attributes is deprecated. When we can break behavioral backward
          * compatibility, drop this block.
          */
         rule_data.node_hash = node->private->attrs;
     }
 
     for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->private->xml);
          a != NULL; a = a->next) {
 
         if (a->children != NULL) {
             dup_attr((gpointer) a->name, (gpointer) a->children->content,
                      meta_hash);
         }
     }
 
     pe__unpack_dataset_nvpairs(rsc->private->xml, PCMK_XE_META_ATTRIBUTES,
                                &rule_data, meta_hash, NULL, FALSE, scheduler);
 
     /* Set the PCMK_XE_META_ATTRIBUTES explicitly set in the parent resource to
      * the hash table of the child resource. If it is already explicitly set as
      * a child, it will not be overwritten.
      */
     if (rsc->private->parent != NULL) {
         expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, scheduler);
     }
 
     /* check the defaults */
     pe__unpack_dataset_nvpairs(scheduler->rsc_defaults, PCMK_XE_META_ATTRIBUTES,
                                &rule_data, meta_hash, NULL, FALSE, scheduler);
 
     /* If there is PCMK_XE_META_ATTRIBUTES that the parent resource has not
      * explicitly set, set a value that is not set from PCMK_XE_RSC_DEFAULTS
      * either. The values already set up to this point will not be overwritten.
      */
     if (rsc->private->parent != NULL) {
         g_hash_table_foreach(rsc->private->parent->private->meta, dup_attr,
                              meta_hash);
     }
 }
 
 void
 get_rsc_attributes(GHashTable *meta_hash, const pcmk_resource_t *rsc,
                    const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
 {
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .now = scheduler->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     if (node) {
         rule_data.node_hash = node->private->attrs;
     }
 
     pe__unpack_dataset_nvpairs(rsc->private->xml, PCMK_XE_INSTANCE_ATTRIBUTES,
                                &rule_data, meta_hash, NULL, FALSE, scheduler);
 
     /* set anything else based on the parent */
     if (rsc->private->parent != NULL) {
         get_rsc_attributes(meta_hash, rsc->private->parent, node, scheduler);
 
     } else {
         if (pcmk__xe_first_child(scheduler->rsc_defaults,
                                  PCMK_XE_INSTANCE_ATTRIBUTES, NULL,
                                  NULL) != NULL) {
             /* Not possible with schema validation enabled
              *
              * @COMPAT Drop support when we can break behavioral
              * backward compatibility
              */
             pcmk__warn_once(pcmk__wo_instance_defaults,
                             "Support for " PCMK_XE_INSTANCE_ATTRIBUTES " in "
                             PCMK_XE_RSC_DEFAULTS " is deprecated and will be "
                             "removed in a future release");
         }
 
         /* and finally check the defaults */
         pe__unpack_dataset_nvpairs(scheduler->rsc_defaults,
                                    PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data,
                                    meta_hash, NULL, FALSE, scheduler);
     }
 }
 
 static char *
 template_op_key(xmlNode * op)
 {
     const char *name = crm_element_value(op, PCMK_XA_NAME);
     const char *role = crm_element_value(op, PCMK_XA_ROLE);
     char *key = NULL;
 
     if ((role == NULL)
         || pcmk__strcase_any_of(role, PCMK_ROLE_STARTED, PCMK_ROLE_UNPROMOTED,
                                 PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
         role = PCMK__ROLE_UNKNOWN;
     }
 
     key = crm_strdup_printf("%s-%s", name, role);
     return key;
 }
 
 static gboolean
 unpack_template(xmlNode *xml_obj, xmlNode **expanded_xml,
                 pcmk_scheduler_t *scheduler)
 {
     xmlNode *cib_resources = NULL;
     xmlNode *template = NULL;
     xmlNode *new_xml = NULL;
     xmlNode *child_xml = NULL;
     xmlNode *rsc_ops = NULL;
     xmlNode *template_ops = NULL;
     const char *template_ref = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pcmk__config_err("No resource object for template unpacking");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = pcmk__xe_id(xml_obj);
     if (id == NULL) {
         pcmk__config_err("'%s' object must have a id", xml_obj->name);
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pcmk__config_err("The resource object '%s' should not reference itself",
                          id);
         return FALSE;
     }
 
     cib_resources = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input,
                                      LOG_TRACE);
     if (cib_resources == NULL) {
         pcmk__config_err("No resources configured");
         return FALSE;
     }
 
     template = pcmk__xe_first_child(cib_resources, PCMK_XE_TEMPLATE,
                                     PCMK_XA_ID, template_ref);
     if (template == NULL) {
         pcmk__config_err("No template named '%s'", template_ref);
         return FALSE;
     }
 
     new_xml = pcmk__xml_copy(NULL, template);
     xmlNodeSetName(new_xml, xml_obj->name);
     crm_xml_add(new_xml, PCMK_XA_ID, id);
     crm_xml_add(new_xml, PCMK__META_CLONE,
                 crm_element_value(xml_obj, PCMK__META_CLONE));
 
     template_ops = pcmk__xe_first_child(new_xml, PCMK_XE_OPERATIONS, NULL,
                                         NULL);
 
     for (child_xml = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
          child_xml != NULL; child_xml = pcmk__xe_next(child_xml)) {
 
         xmlNode *new_child = pcmk__xml_copy(new_xml, child_xml);
 
         if (pcmk__xe_is(new_child, PCMK_XE_OPERATIONS)) {
             rsc_ops = new_child;
         }
     }
 
     if (template_ops && rsc_ops) {
         xmlNode *op = NULL;
         GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
 
         for (op = pcmk__xe_first_child(rsc_ops, NULL, NULL, NULL); op != NULL;
              op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             g_hash_table_insert(rsc_ops_hash, key, op);
         }
 
         for (op = pcmk__xe_first_child(template_ops, NULL, NULL, NULL);
              op != NULL; op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
                 pcmk__xml_copy(rsc_ops, op);
             }
 
             free(key);
         }
 
         if (rsc_ops_hash) {
             g_hash_table_destroy(rsc_ops_hash);
         }
 
         pcmk__xml_free(template_ops);
     }
 
     /*pcmk__xml_free(*expanded_xml); */
     *expanded_xml = new_xml;
 
 #if 0 /* Disable multi-level templates for now */
     if (!unpack_template(new_xml, expanded_xml, scheduler)) {
        pcmk__xml_free(*expanded_xml);
        *expanded_xml = NULL;
        return FALSE;
     }
 #endif
 
     return TRUE;
 }
 
 static gboolean
 add_template_rsc(xmlNode *xml_obj, pcmk_scheduler_t *scheduler)
 {
     const char *template_ref = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pcmk__config_err("No resource object for processing resource list "
                          "of template");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, PCMK_XA_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = pcmk__xe_id(xml_obj);
     if (id == NULL) {
         pcmk__config_err("'%s' object must have a id", xml_obj->name);
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pcmk__config_err("The resource object '%s' should not reference itself",
                          id);
         return FALSE;
     }
 
     pcmk__add_idref(scheduler->template_rsc_sets, template_ref, id);
     return TRUE;
 }
 
 static bool
 detect_promotable(pcmk_resource_t *rsc)
 {
     const char *promotable = g_hash_table_lookup(rsc->private->meta,
                                                  PCMK_META_PROMOTABLE);
 
     if (crm_is_true(promotable)) {
         return TRUE;
     }
 
     // @COMPAT deprecated since 2.0.0
     if (pcmk__xe_is(rsc->private->xml, PCMK__XE_PROMOTABLE_LEGACY)) {
         pcmk__warn_once(pcmk__wo_master_element,
                         "Support for <" PCMK__XE_PROMOTABLE_LEGACY "> (such "
                         "as in %s) is deprecated and will be removed in a "
                         "future release. Use <" PCMK_XE_CLONE "> with a "
                         PCMK_META_PROMOTABLE " meta-attribute instead.",
                         rsc->id);
         pcmk__insert_dup(rsc->private->meta, PCMK_META_PROMOTABLE,
                          PCMK_VALUE_TRUE);
         return TRUE;
     }
     return FALSE;
 }
 
 static void
 free_params_table(gpointer data)
 {
     g_hash_table_destroy((GHashTable *) data);
 }
 
 /*!
  * \brief Get a table of resource parameters
  *
  * \param[in,out] rsc        Resource to query
  * \param[in]     node       Node for evaluating rules (NULL for defaults)
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Hash table containing resource parameter names and values
  *         (or NULL if \p rsc or \p scheduler is NULL)
  * \note The returned table will be destroyed when the resource is freed, so
  *       callers should not destroy it.
  */
 GHashTable *
 pe_rsc_params(pcmk_resource_t *rsc, const pcmk_node_t *node,
               pcmk_scheduler_t *scheduler)
 {
     GHashTable *params_on_node = NULL;
 
     /* A NULL node is used to request the resource's default parameters
      * (not evaluated for node), but we always want something non-NULL
      * as a hash table key.
      */
     const char *node_name = "";
 
     // Sanity check
     if ((rsc == NULL) || (scheduler == NULL)) {
         return NULL;
     }
     if ((node != NULL) && (node->private->name != NULL)) {
         node_name = node->private->name;
     }
 
     // Find the parameter table for given node
     if (rsc->private->parameter_cache == NULL) {
         rsc->private->parameter_cache = pcmk__strikey_table(free,
                                                             free_params_table);
     } else {
         params_on_node = g_hash_table_lookup(rsc->private->parameter_cache,
                                              node_name);
     }
 
     // If none exists yet, create one with parameters evaluated for node
     if (params_on_node == NULL) {
         params_on_node = pcmk__strkey_table(free, free);
         get_rsc_attributes(params_on_node, rsc, node, scheduler);
         g_hash_table_insert(rsc->private->parameter_cache, strdup(node_name),
                             params_on_node);
     }
     return params_on_node;
 }
 
 /*!
  * \internal
  * \brief Unpack a resource's \c PCMK_META_REQUIRES meta-attribute
  *
  * \param[in,out] rsc         Resource being unpacked
  * \param[in]     value       Value of \c PCMK_META_REQUIRES meta-attribute
  * \param[in]     is_default  Whether \p value was selected by default
  */
 static void
 unpack_requires(pcmk_resource_t *rsc, const char *value, bool is_default)
 {
     const pcmk_scheduler_t *scheduler = rsc->private->scheduler;
 
     if (pcmk__str_eq(value, PCMK_VALUE_NOTHING, pcmk__str_casei)) {
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_QUORUM, pcmk__str_casei)) {
         pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_quorum);
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_FENCING, pcmk__str_casei)) {
         pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_fencing);
         if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
             pcmk__config_warn("%s requires fencing but fencing is disabled",
                               rsc->id);
         }
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_UNFENCING, pcmk__str_casei)) {
         if (pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) {
             pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s "
                               "to \"" PCMK_VALUE_QUORUM "\" because fencing "
                               "devices cannot require unfencing", rsc->id);
             unpack_requires(rsc, PCMK_VALUE_QUORUM, true);
             return;
 
         } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
             pcmk__config_warn("Resetting \"" PCMK_META_REQUIRES "\" for %s "
                               "to \"" PCMK_VALUE_QUORUM "\" because fencing is "
                               "disabled", rsc->id);
             unpack_requires(rsc, PCMK_VALUE_QUORUM, true);
             return;
 
         } else {
             pcmk__set_rsc_flags(rsc, pcmk__rsc_needs_fencing
                                      |pcmk__rsc_needs_unfencing);
         }
 
     } else {
         const char *orig_value = value;
 
         if (pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) {
             value = PCMK_VALUE_QUORUM;
 
         } else if (pcmk__is_primitive(rsc)
                    && xml_contains_remote_node(rsc->private->xml)) {
             value = PCMK_VALUE_QUORUM;
 
         } else if (pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
             value = PCMK_VALUE_UNFENCING;
 
         } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
             value = PCMK_VALUE_FENCING;
 
         } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) {
             value = PCMK_VALUE_NOTHING;
 
         } else {
             value = PCMK_VALUE_QUORUM;
         }
 
         if (orig_value != NULL) {
             pcmk__config_err("Resetting '" PCMK_META_REQUIRES "' for %s "
                              "to '%s' because '%s' is not valid",
                               rsc->id, value, orig_value);
         }
         unpack_requires(rsc, value, true);
         return;
     }
 
     pcmk__rsc_trace(rsc, "\tRequired to start: %s%s", value,
                     (is_default? " (default)" : ""));
 }
 
 static void
 warn_about_deprecated_classes(pcmk_resource_t *rsc)
 {
     const char *std = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
 
     if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
         pcmk__warn_once(pcmk__wo_upstart,
                         "Support for Upstart resources (such as %s) is "
                         "deprecated and will be removed in a future release",
                         rsc->id);
 
     } else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
         pcmk__warn_once(pcmk__wo_nagios,
                         "Support for Nagios resources (such as %s) is "
                         "deprecated and will be removed in a future release",
                         rsc->id);
     }
 }
 
 /*!
  * \internal
  * \brief Unpack configuration XML for a given resource
  *
  * Unpack the XML object containing a resource's configuration into a new
  * \c pcmk_resource_t object.
  *
  * \param[in]     xml_obj    XML node containing the resource's configuration
  * \param[out]    rsc        Where to store the unpacked resource information
  * \param[in]     parent     Resource's parent, if any
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Standard Pacemaker return code
  * \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
  *       the caller is responsible for freeing it using its variant-specific
  *       free() method. Otherwise, \p *rsc is guaranteed to be NULL.
  */
 int
 pe__unpack_resource(xmlNode *xml_obj, pcmk_resource_t **rsc,
                     pcmk_resource_t *parent, pcmk_scheduler_t *scheduler)
 {
     xmlNode *expanded_xml = NULL;
     xmlNode *ops = NULL;
     const char *value = NULL;
     const char *id = NULL;
     bool guest_node = false;
     bool remote_node = false;
     pcmk__resource_private_t *rsc_private = NULL;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .now = NULL,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     CRM_CHECK(rsc != NULL, return EINVAL);
     CRM_CHECK((xml_obj != NULL) && (scheduler != NULL),
               *rsc = NULL;
               return EINVAL);
 
     rule_data.now = scheduler->now;
 
     crm_log_xml_trace(xml_obj, "[raw XML]");
 
     id = crm_element_value(xml_obj, PCMK_XA_ID);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> configuration without " PCMK_XA_ID,
                          xml_obj->name);
         return pcmk_rc_unpack_error;
     }
 
     if (unpack_template(xml_obj, &expanded_xml, scheduler) == FALSE) {
         return pcmk_rc_unpack_error;
     }
 
     *rsc = calloc(1, sizeof(pcmk_resource_t));
     if (*rsc == NULL) {
-        pcmk__sched_err("Unable to allocate memory for resource '%s'", id);
+        pcmk__sched_err(scheduler,
+                        "Unable to allocate memory for resource '%s'", id);
         return ENOMEM;
     }
 
     (*rsc)->private = calloc(1, sizeof(pcmk__resource_private_t));
     if ((*rsc)->private == NULL) {
-        pcmk__sched_err("Unable to allocate memory for resource '%s'", id);
+        pcmk__sched_err(scheduler,
+                        "Unable to allocate memory for resource '%s'", id);
         free(*rsc);
         return ENOMEM;
     }
     rsc_private = (*rsc)->private;
 
     rsc_private->scheduler = scheduler;
 
     if (expanded_xml) {
         crm_log_xml_trace(expanded_xml, "[expanded XML]");
         rsc_private->xml = expanded_xml;
         rsc_private->orig_xml = xml_obj;
 
     } else {
         rsc_private->xml = xml_obj;
         rsc_private->orig_xml = NULL;
     }
 
     /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
 
     rsc_private->parent = parent;
 
     ops = pcmk__xe_first_child(rsc_private->xml, PCMK_XE_OPERATIONS, NULL,
                                NULL);
     rsc_private->ops_xml = pcmk__xe_resolve_idref(ops, scheduler->input);
 
     rsc_private->variant = get_resource_type((const char *)
                                              rsc_private->xml->name);
     if (rsc_private->variant == pcmk__rsc_variant_unknown) {
         pcmk__config_err("Ignoring resource '%s' of unknown type '%s'",
                          id, rsc_private->xml->name);
         common_free(*rsc);
         *rsc = NULL;
         return pcmk_rc_unpack_error;
     }
 
     rsc_private->meta = pcmk__strkey_table(free, free);
     rsc_private->utilization = pcmk__strkey_table(free, free);
     rsc_private->probed_nodes = pcmk__strkey_table(NULL, free);
     rsc_private->allowed_nodes = pcmk__strkey_table(NULL, free);
 
     value = crm_element_value(rsc_private->xml, PCMK__META_CLONE);
     if (value) {
         (*rsc)->id = crm_strdup_printf("%s:%s", id, value);
         pcmk__insert_meta(rsc_private, PCMK__META_CLONE, value);
 
     } else {
         (*rsc)->id = strdup(id);
     }
 
     warn_about_deprecated_classes(*rsc);
 
     rsc_private->fns = &resource_class_functions[rsc_private->variant];
 
     get_meta_attributes(rsc_private->meta, *rsc, NULL, scheduler);
 
     (*rsc)->flags = 0;
     pcmk__set_rsc_flags(*rsc, pcmk__rsc_unassigned);
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_managed);
     }
 
     rsc_private->orig_role = pcmk_role_stopped;
     rsc_private->next_role = pcmk_role_unknown;
 
     rsc_private->ban_after_failures = PCMK_SCORE_INFINITY;
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_PRIORITY);
     rsc_private->priority = char2score(value);
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_CRITICAL);
     if ((value == NULL) || crm_is_true(value)) {
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_critical);
     }
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_NOTIFY);
     if (crm_is_true(value)) {
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_notify);
     }
 
     if (xml_contains_remote_node(rsc_private->xml)) {
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_is_remote_connection);
         if (g_hash_table_lookup(rsc_private->meta, PCMK__META_CONTAINER)) {
             guest_node = true;
         } else {
             remote_node = true;
         }
     }
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_ALLOW_MIGRATE);
     if (crm_is_true(value)) {
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_migratable);
     } else if ((value == NULL) && remote_node) {
         /* By default, we want remote nodes to be able
          * to float around the cluster without having to stop all the
          * resources within the remote-node before moving. Allowing
          * migration support enables this feature. If this ever causes
          * problems, migration support can be explicitly turned off with
          * PCMK_META_ALLOW_MIGRATE=false.
          */
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_migratable);
     }
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_IS_MANAGED);
     if (value != NULL) {
         if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
             // @COMPAT Deprecated since 2.1.8
             pcmk__config_warn("Support for setting " PCMK_META_IS_MANAGED
                               " to the explicit value '" PCMK_VALUE_DEFAULT
                               "' is deprecated and will be removed in a "
                               "future release (just leave it unset)");
         } else if (crm_is_true(value)) {
             pcmk__set_rsc_flags(*rsc, pcmk__rsc_managed);
         } else {
             pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed);
         }
     }
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_MAINTENANCE);
     if (crm_is_true(value)) {
         pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed);
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_maintenance);
     }
     if (pcmk_is_set(scheduler->flags, pcmk_sched_in_maintenance)) {
         pcmk__clear_rsc_flags(*rsc, pcmk__rsc_managed);
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_maintenance);
     }
 
     if (pcmk__is_clone(pe__const_top_resource(*rsc, false))) {
         value = g_hash_table_lookup(rsc_private->meta,
                                     PCMK_META_GLOBALLY_UNIQUE);
         if (crm_is_true(value)) {
             pcmk__set_rsc_flags(*rsc, pcmk__rsc_unique);
         }
         if (detect_promotable(*rsc)) {
             pcmk__set_rsc_flags(*rsc, pcmk__rsc_promotable);
         }
     } else {
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_unique);
     }
 
     // @COMPAT Deprecated meta-attribute
     value = g_hash_table_lookup(rsc_private->meta, PCMK__META_RESTART_TYPE);
     if (pcmk__str_eq(value, PCMK_VALUE_RESTART, pcmk__str_casei)) {
         rsc_private->restart_type = pcmk__restart_restart;
         pcmk__rsc_trace(*rsc, "%s dependency restart handling: restart",
                         (*rsc)->id);
         pcmk__warn_once(pcmk__wo_restart_type,
                         "Support for " PCMK__META_RESTART_TYPE " is deprecated "
                         "and will be removed in a future release");
 
     } else {
         rsc_private->restart_type = pcmk__restart_ignore;
         pcmk__rsc_trace(*rsc, "%s dependency restart handling: ignore",
                         (*rsc)->id);
     }
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_MULTIPLE_ACTIVE);
     if (pcmk__str_eq(value, PCMK_VALUE_STOP_ONLY, pcmk__str_casei)) {
         rsc_private->multiply_active_policy = pcmk__multiply_active_stop;
         pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: stop only",
                         (*rsc)->id);
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_BLOCK, pcmk__str_casei)) {
         rsc_private->multiply_active_policy = pcmk__multiply_active_block;
         pcmk__rsc_trace(*rsc, "%s multiple running resource recovery: block",
                         (*rsc)->id);
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_STOP_UNEXPECTED,
                             pcmk__str_casei)) {
         rsc_private->multiply_active_policy = pcmk__multiply_active_unexpected;
         pcmk__rsc_trace(*rsc,
                         "%s multiple running resource recovery: "
                         "stop unexpected instances",
                         (*rsc)->id);
 
     } else { // PCMK_VALUE_STOP_START
         if (!pcmk__str_eq(value, PCMK_VALUE_STOP_START,
                           pcmk__str_casei|pcmk__str_null_matches)) {
             pcmk__config_warn("%s is not a valid value for "
                               PCMK_META_MULTIPLE_ACTIVE
                               ", using default of "
                               "\"" PCMK_VALUE_STOP_START "\"",
                               value);
         }
         rsc_private->multiply_active_policy = pcmk__multiply_active_restart;
         pcmk__rsc_trace(*rsc,
                         "%s multiple running resource recovery: stop/start",
                         (*rsc)->id);
     }
 
     value = g_hash_table_lookup(rsc_private->meta,
                                 PCMK_META_RESOURCE_STICKINESS);
     if (value != NULL) {
         if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
             // @COMPAT Deprecated since 2.1.8
             pcmk__config_warn("Support for setting "
                               PCMK_META_RESOURCE_STICKINESS
                               " to the explicit value '" PCMK_VALUE_DEFAULT
                               "' is deprecated and will be removed in a "
                               "future release (just leave it unset)");
         } else {
             rsc_private->stickiness = char2score(value);
         }
     }
 
     value = g_hash_table_lookup(rsc_private->meta,
                                 PCMK_META_MIGRATION_THRESHOLD);
     if (value != NULL) {
         if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
             // @COMPAT Deprecated since 2.1.8
             pcmk__config_warn("Support for setting "
                               PCMK_META_MIGRATION_THRESHOLD
                               " to the explicit value '" PCMK_VALUE_DEFAULT
                               "' is deprecated and will be removed in a "
                               "future release (just leave it unset)");
         } else {
             rsc_private->ban_after_failures = char2score(value);
             if (rsc_private->ban_after_failures < 0) {
                 /* @COMPAT We use 1 here to preserve previous behavior, but this
                  * should probably use the default (INFINITY) or 0 (to disable)
                  * instead.
                  */
                 pcmk__warn_once(pcmk__wo_neg_threshold,
                                 PCMK_META_MIGRATION_THRESHOLD
                                 " must be non-negative, using 1 instead");
                 rsc_private->ban_after_failures = 1;
             }
         }
     }
 
     if (pcmk__str_eq(crm_element_value(rsc_private->xml, PCMK_XA_CLASS),
                      PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_fencing);
         pcmk__set_rsc_flags(*rsc, pcmk__rsc_fence_device);
     }
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_REQUIRES);
     unpack_requires(*rsc, value, false);
 
     value = g_hash_table_lookup(rsc_private->meta, PCMK_META_FAILURE_TIMEOUT);
     if (value != NULL) {
         pcmk_parse_interval_spec(value, &(rsc_private->failure_expiration_ms));
     }
 
     if (remote_node) {
         GHashTable *params = pe_rsc_params(*rsc, NULL, scheduler);
 
         /* Grabbing the value now means that any rules based on node attributes
          * will evaluate to false, so such rules should not be used with
          * PCMK_REMOTE_RA_RECONNECT_INTERVAL.
          *
          * @TODO Evaluate per node before using
          */
         value = g_hash_table_lookup(params, PCMK_REMOTE_RA_RECONNECT_INTERVAL);
         if (value) {
             /* reconnect delay works by setting failure_timeout and preventing the
              * connection from starting until the failure is cleared. */
             pcmk_parse_interval_spec(value,
                                      &(rsc_private->remote_reconnect_ms));
 
             /* We want to override any default failure_timeout in use when remote
              * PCMK_REMOTE_RA_RECONNECT_INTERVAL is in use.
              */
             rsc_private->failure_expiration_ms =
                 rsc_private->remote_reconnect_ms;
         }
     }
 
     get_target_role(*rsc, &(rsc_private->next_role));
     pcmk__rsc_trace(*rsc, "%s desired next state: %s", (*rsc)->id,
                     (rsc_private->next_role == pcmk_role_unknown)?
                         "default" : pcmk_role_text(rsc_private->next_role));
 
     if (rsc_private->fns->unpack(*rsc, scheduler) == FALSE) {
         rsc_private->fns->free(*rsc);
         *rsc = NULL;
         return pcmk_rc_unpack_error;
     }
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
         // This tag must stay exactly the same because it is tested elsewhere
         resource_location(*rsc, NULL, 0, "symmetric_default", scheduler);
     } else if (guest_node) {
         /* remote resources tied to a container resource must always be allowed
          * to opt-in to the cluster. Whether the connection resource is actually
          * allowed to be placed on a node is dependent on the container resource */
         resource_location(*rsc, NULL, 0, "remote_connection_default",
                           scheduler);
     }
 
     pcmk__rsc_trace(*rsc, "%s action notification: %s", (*rsc)->id,
                     pcmk_is_set((*rsc)->flags, pcmk__rsc_notify)? "required" : "not required");
 
     pe__unpack_dataset_nvpairs(rsc_private->xml, PCMK_XE_UTILIZATION,
                                &rule_data, rsc_private->utilization, NULL,
                                FALSE, scheduler);
 
     if (expanded_xml) {
         if (add_template_rsc(xml_obj, scheduler) == FALSE) {
             rsc_private->fns->free(*rsc);
             *rsc = NULL;
             return pcmk_rc_unpack_error;
         }
     }
     return pcmk_rc_ok;
 }
 
 gboolean
 is_parent(pcmk_resource_t *child, pcmk_resource_t *rsc)
 {
     pcmk_resource_t *parent = child;
 
     if (parent == NULL || rsc == NULL) {
         return FALSE;
     }
     while (parent->private->parent != NULL) {
         if (parent->private->parent == rsc) {
             return TRUE;
         }
         parent = parent->private->parent;
     }
     return FALSE;
 }
 
 pcmk_resource_t *
 uber_parent(pcmk_resource_t *rsc)
 {
     pcmk_resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while ((parent->private->parent != NULL)
            && !pcmk__is_bundle(parent->private->parent)) {
         parent = parent->private->parent;
     }
     return parent;
 }
 
 /*!
  * \internal
  * \brief Get the topmost parent of a resource as a const pointer
  *
  * \param[in] rsc             Resource to check
  * \param[in] include_bundle  If true, go all the way to bundle
  *
  * \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
  *         the bundle if \p rsc is bundled and \p include_bundle is true,
  *         otherwise the topmost parent of \p rsc up to a clone
  */
 const pcmk_resource_t *
 pe__const_top_resource(const pcmk_resource_t *rsc, bool include_bundle)
 {
     const pcmk_resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while (parent->private->parent != NULL) {
         if (!include_bundle && pcmk__is_bundle(parent->private->parent)) {
             break;
         }
         parent = parent->private->parent;
     }
     return parent;
 }
 
 void
 common_free(pcmk_resource_t * rsc)
 {
     if (rsc == NULL) {
         return;
     }
 
     pcmk__rsc_trace(rsc, "Freeing %s", rsc->id);
 
     if (rsc->private->parameter_cache != NULL) {
         g_hash_table_destroy(rsc->private->parameter_cache);
     }
 
     if ((rsc->private->parent == NULL)
         && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
 
         pcmk__xml_free(rsc->private->xml);
         rsc->private->xml = NULL;
         pcmk__xml_free(rsc->private->orig_xml);
         rsc->private->orig_xml = NULL;
 
     } else if (rsc->private->orig_xml != NULL) {
         // rsc->private->xml was expanded from a template
         pcmk__xml_free(rsc->private->xml);
         rsc->private->xml = NULL;
     }
     free(rsc->id);
 
     free(rsc->private->variant_opaque);
     free(rsc->private->history_id);
     free(rsc->private->pending_action);
     free(rsc->private->assigned_node);
 
     g_list_free(rsc->private->actions);
     g_list_free(rsc->private->active_nodes);
     g_list_free(rsc->private->launched);
     g_list_free(rsc->private->dangling_migration_sources);
     g_list_free(rsc->private->with_this_colocations);
     g_list_free(rsc->private->this_with_colocations);
     g_list_free(rsc->private->location_constraints);
     g_list_free(rsc->private->ticket_constraints);
 
     if (rsc->private->meta != NULL) {
         g_hash_table_destroy(rsc->private->meta);
     }
     if (rsc->private->utilization != NULL) {
         g_hash_table_destroy(rsc->private->utilization);
     }
     if (rsc->private->probed_nodes != NULL) {
         g_hash_table_destroy(rsc->private->probed_nodes);
     }
     if (rsc->private->allowed_nodes != NULL) {
         g_hash_table_destroy(rsc->private->allowed_nodes);
     }
 
     free(rsc->private);
 
     free(rsc);
 }
 
 /*!
  * \internal
  * \brief Count a node and update most preferred to it as appropriate
  *
  * \param[in]     rsc          An active resource
  * \param[in]     node         A node that \p rsc is active on
  * \param[in,out] active       This will be set to \p node if \p node is more
  *                             preferred than the current value
  * \param[in,out] count_all    If not NULL, this will be incremented
  * \param[in,out] count_clean  If not NULL, this will be incremented if \p node
  *                             is online and clean
  *
  * \return true if the count should continue, or false if sufficiently known
  */
 bool
 pe__count_active_node(const pcmk_resource_t *rsc, pcmk_node_t *node,
                       pcmk_node_t **active, unsigned int *count_all,
                       unsigned int *count_clean)
 {
     bool keep_looking = false;
     bool is_happy = false;
 
     CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
               return false);
 
     is_happy = node->details->online && !node->details->unclean;
 
     if (count_all != NULL) {
         ++*count_all;
     }
     if ((count_clean != NULL) && is_happy) {
         ++*count_clean;
     }
     if ((count_all != NULL) || (count_clean != NULL)) {
         keep_looking = true; // We're counting, so go through entire list
     }
 
     if (rsc->private->partial_migration_source != NULL) {
         if (pcmk__same_node(node, rsc->private->partial_migration_source)) {
             *active = node; // This is the migration source
         } else {
             keep_looking = true;
         }
     } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) {
         if (is_happy && ((*active == NULL) || !(*active)->details->online
                          || (*active)->details->unclean)) {
             *active = node; // This is the first clean node
         } else {
             keep_looking = true;
         }
     }
     if (*active == NULL) {
         *active = node; // This is the first node checked
     }
     return keep_looking;
 }
 
 // Shared implementation of pcmk__rsc_methods_t:active_node()
 static pcmk_node_t *
 active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
             unsigned int *count_clean)
 {
     pcmk_node_t *active = NULL;
 
     if (count_all != NULL) {
         *count_all = 0;
     }
     if (count_clean != NULL) {
         *count_clean = 0;
     }
     if (rsc == NULL) {
         return NULL;
     }
     for (GList *iter = rsc->private->active_nodes;
          iter != NULL; iter = iter->next) {
 
         if (!pe__count_active_node(rsc, (pcmk_node_t *) iter->data, &active,
                                    count_all, count_clean)) {
             break; // Don't waste time iterating if we don't have to
         }
     }
     return active;
 }
 
 /*!
  * \brief
  * \internal Find and count active nodes according to \c PCMK_META_REQUIRES
  *
  * \param[in]  rsc    Resource to check
  * \param[out] count  If not NULL, will be set to count of active nodes
  *
  * \return An active node (or NULL if resource is not active anywhere)
  *
  * \note This is a convenience wrapper for active_node() where the count of all
  *       active nodes or only clean active nodes is desired according to the
  *       \c PCMK_META_REQUIRES meta-attribute.
  */
 pcmk_node_t *
 pe__find_active_requires(const pcmk_resource_t *rsc, unsigned int *count)
 {
     if (rsc == NULL) {
         if (count != NULL) {
             *count = 0;
         }
         return NULL;
     }
 
     if (pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)) {
         return rsc->private->fns->active_node(rsc, count, NULL);
     } else {
         return rsc->private->fns->active_node(rsc, NULL, count);
     }
 }
 
 void
 pe__count_common(pcmk_resource_t *rsc)
 {
     if (rsc->private->children != NULL) {
         for (GList *item = rsc->private->children;
              item != NULL; item = item->next) {
             pcmk_resource_t *child = item->data;
 
             child->private->fns->count(item->data);
         }
 
     } else if (!pcmk_is_set(rsc->flags, pcmk__rsc_removed)
                || (rsc->private->orig_role > pcmk_role_stopped)) {
         rsc->private->scheduler->ninstances++;
         if (pe__resource_is_disabled(rsc)) {
             rsc->private->scheduler->disabled_resources++;
         }
         if (pcmk_is_set(rsc->flags, pcmk__rsc_blocked)) {
             rsc->private->scheduler->blocked_resources++;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Update a resource's next role
  *
  * \param[in,out] rsc   Resource to be updated
  * \param[in]     role  Resource's new next role
  * \param[in]     why   Human-friendly reason why role is changing (for logs)
  */
 void
 pe__set_next_role(pcmk_resource_t *rsc, enum rsc_role_e role, const char *why)
 {
     CRM_ASSERT((rsc != NULL) && (why != NULL));
     if (rsc->private->next_role != role) {
         pcmk__rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
                         rsc->id, pcmk_role_text(rsc->private->next_role),
                         pcmk_role_text(role), why);
         rsc->private->next_role = role;
     }
 }
diff --git a/lib/pengine/pe_digest.c b/lib/pengine/pe_digest.c
index 6d9842111f..b427b2d0de 100644
--- a/lib/pengine/pe_digest.c
+++ b/lib/pengine/pe_digest.c
@@ -1,611 +1,612 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <glib.h>
 #include <stdbool.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 #include <crm/pengine/internal.h>
 #include "pe_status_private.h"
 
 extern bool pcmk__is_daemon;
 
 /*!
  * \internal
  * \brief Free an operation digest cache entry
  *
  * \param[in,out] ptr  Pointer to cache entry to free
  *
  * \note The argument is a gpointer so this can be used as a hash table
  *       free function.
  */
 void
 pe__free_digests(gpointer ptr)
 {
     pcmk__op_digest_t *data = ptr;
 
     if (data != NULL) {
         pcmk__xml_free(data->params_all);
         pcmk__xml_free(data->params_secure);
         pcmk__xml_free(data->params_restart);
 
         free(data->digest_all_calc);
         free(data->digest_restart_calc);
         free(data->digest_secure_calc);
 
         free(data);
     }
 }
 
 // Return true if XML attribute name is not substring of a given string
 static bool
 attr_not_in_string(xmlAttrPtr a, void *user_data)
 {
     bool filter = false;
     char *name = crm_strdup_printf(" %s ", (const char *) a->name);
 
     if (strstr((const char *) user_data, name) == NULL) {
         crm_trace("Filtering %s (not found in '%s')",
                   (const char *) a->name, (const char *) user_data);
         filter = true;
     }
     free(name);
     return filter;
 }
 
 // Return true if XML attribute name is substring of a given string
 static bool
 attr_in_string(xmlAttrPtr a, void *user_data)
 {
     bool filter = false;
     char *name = crm_strdup_printf(" %s ", (const char *) a->name);
 
     if (strstr((const char *) user_data, name) != NULL) {
         crm_trace("Filtering %s (found in '%s')",
                   (const char *) a->name, (const char *) user_data);
         filter = true;
     }
     free(name);
     return filter;
 }
 
 /*!
  * \internal
  * \brief Add digest of all parameters to a digest cache entry
  *
  * \param[out]    data         Digest cache entry to modify
  * \param[in,out] rsc          Resource that action was for
  * \param[in]     node         Node action was performed on
  * \param[in]     params       Resource parameters evaluated for node
  * \param[in]     task         Name of action performed
  * \param[in,out] interval_ms  Action's interval (will be reset if in overrides)
  * \param[in]     xml_op       Unused
  * \param[in]     op_version   CRM feature set to use for digest calculation
  * \param[in]     overrides    Key/value table to override resource parameters
  * \param[in,out] scheduler    Scheduler data
  */
 static void
 calculate_main_digest(pcmk__op_digest_t *data, pcmk_resource_t *rsc,
                       const pcmk_node_t *node, GHashTable *params,
                       const char *task, guint *interval_ms,
                       const xmlNode *xml_op, const char *op_version,
                       GHashTable *overrides, pcmk_scheduler_t *scheduler)
 {
     xmlNode *action_config = NULL;
 
     data->params_all = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
 
     /* REMOTE_CONTAINER_HACK: Allow Pacemaker Remote nodes to run containers
      * that themselves are Pacemaker Remote nodes
      */
     (void) pe__add_bundle_remote_name(rsc, data->params_all,
                                       PCMK_REMOTE_RA_ADDR);
 
     if (overrides != NULL) {
         // If interval was overridden, reset it
         const char *meta_name = CRM_META "_" PCMK_META_INTERVAL;
         const char *interval_s = g_hash_table_lookup(overrides, meta_name);
 
         if (interval_s != NULL) {
             long long value_ll;
 
             if ((pcmk__scan_ll(interval_s, &value_ll, 0LL) == pcmk_rc_ok)
                 && (value_ll >= 0) && (value_ll <= G_MAXUINT)) {
                 *interval_ms = (guint) value_ll;
             }
         }
 
         // Add overrides to list of all parameters
         g_hash_table_foreach(overrides, hash2field, data->params_all);
     }
 
     // Add provided instance parameters
     g_hash_table_foreach(params, hash2field, data->params_all);
 
     // Find action configuration XML in CIB
     action_config = pcmk__find_action_config(rsc, task, *interval_ms, true);
 
     /* Add action-specific resource instance attributes to the digest list.
      *
      * If this is a one-time action with action-specific instance attributes,
      * enforce a restart instead of reload-agent in case the main digest doesn't
      * match, even if the restart digest does. This ensures any changes of the
      * action-specific parameters get applied for this specific action, and
      * digests calculated for the resulting history will be correct. Default the
      * result to RSC_DIGEST_RESTART for the case where the main digest doesn't
      * match.
      */
     params = pcmk__unpack_action_rsc_params(action_config, node->private->attrs,
                                             scheduler);
     if ((*interval_ms == 0) && (g_hash_table_size(params) > 0)) {
         data->rc = pcmk__digest_restart;
     }
     g_hash_table_foreach(params, hash2field, data->params_all);
     g_hash_table_destroy(params);
 
     // Add action meta-attributes
     params = pcmk__unpack_action_meta(rsc, node, task, *interval_ms,
                                       action_config);
     g_hash_table_foreach(params, hash2metafield, data->params_all);
     g_hash_table_destroy(params);
 
     pcmk__filter_op_for_digest(data->params_all);
 
     data->digest_all_calc = pcmk__digest_operation(data->params_all);
 }
 
 // Return true if XML attribute name is a Pacemaker-defined fencing parameter
 static bool
 is_fence_param(xmlAttrPtr attr, void *user_data)
 {
     return pcmk_stonith_param((const char *) attr->name);
 }
 
 /*!
  * \internal
  * \brief Add secure digest to a digest cache entry
  *
  * \param[out] data        Digest cache entry to modify
  * \param[in]  rsc         Resource that action was for
  * \param[in]  params      Resource parameters evaluated for node
  * \param[in]  xml_op      XML of operation in CIB status (if available)
  * \param[in]  op_version  CRM feature set to use for digest calculation
  * \param[in]  overrides   Key/value hash table to override resource parameters
  */
 static void
 calculate_secure_digest(pcmk__op_digest_t *data, const pcmk_resource_t *rsc,
                         GHashTable *params, const xmlNode *xml_op,
                         const char *op_version, GHashTable *overrides)
 {
     const char *class = crm_element_value(rsc->private->xml, PCMK_XA_CLASS);
     const char *secure_list = NULL;
     bool old_version = (compare_version(op_version, "3.16.0") < 0);
 
     if (xml_op == NULL) {
         secure_list = " passwd password user ";
     } else {
         secure_list = crm_element_value(xml_op, PCMK__XA_OP_SECURE_PARAMS);
     }
 
     if (old_version) {
         data->params_secure = pcmk__xe_create(NULL, PCMK_XE_PARAMETERS);
         if (overrides != NULL) {
             g_hash_table_foreach(overrides, hash2field, data->params_secure);
         }
 
         g_hash_table_foreach(params, hash2field, data->params_secure);
 
     } else {
         // Start with a copy of all parameters
         data->params_secure = pcmk__xml_copy(NULL, data->params_all);
     }
 
     if (secure_list != NULL) {
         pcmk__xe_remove_matching_attrs(data->params_secure, attr_in_string,
                                        (void *) secure_list);
     }
     if (old_version
         && pcmk_is_set(pcmk_get_ra_caps(class),
                        pcmk_ra_cap_fence_params)) {
         /* For stonith resources, Pacemaker adds special parameters,
          * but these are not listed in fence agent meta-data, so with older
          * versions of DC, the controller will not hash them. That means we have
          * to filter them out before calculating our hash for comparison.
          */
         pcmk__xe_remove_matching_attrs(data->params_secure, is_fence_param,
                                        NULL);
     }
     pcmk__filter_op_for_digest(data->params_secure);
 
     /* CRM_meta_timeout *should* be part of a digest for recurring operations.
      * However, with older versions of DC, the controller does not add timeout
      * to secure digests, because it only includes parameters declared by the
      * resource agent.
      * Remove any timeout that made it this far, to match.
      */
     if (old_version) {
         pcmk__xe_remove_attr(data->params_secure,
                              CRM_META "_" PCMK_META_TIMEOUT);
     }
 
     data->digest_secure_calc = pcmk__digest_operation(data->params_secure);
 }
 
 /*!
  * \internal
  * \brief Add restart digest to a digest cache entry
  *
  * \param[out] data        Digest cache entry to modify
  * \param[in]  xml_op      XML of operation in CIB status (if available)
  * \param[in]  op_version  CRM feature set to use for digest calculation
  *
  * \note This function doesn't need to handle overrides because it starts with
  *       data->params_all, which already has overrides applied.
  */
 static void
 calculate_restart_digest(pcmk__op_digest_t *data, const xmlNode *xml_op,
                          const char *op_version)
 {
     const char *value = NULL;
 
     // We must have XML of resource operation history
     if (xml_op == NULL) {
         return;
     }
 
     // And the history must have a restart digest to compare against
     if (crm_element_value(xml_op, PCMK__XA_OP_RESTART_DIGEST) == NULL) {
         return;
     }
 
     // Start with a copy of all parameters
     data->params_restart = pcmk__xml_copy(NULL, data->params_all);
 
     // Then filter out reloadable parameters, if any
     value = crm_element_value(xml_op, PCMK__XA_OP_FORCE_RESTART);
     if (value != NULL) {
         pcmk__xe_remove_matching_attrs(data->params_restart, attr_not_in_string,
                                        (void *) value);
     }
 
     value = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET);
     data->digest_restart_calc = pcmk__digest_operation(data->params_restart);
 }
 
 /*!
  * \internal
  * \brief Create a new digest cache entry with calculated digests
  *
  * \param[in,out] rsc          Resource that action was for
  * \param[in]     task         Name of action performed
  * \param[in,out] interval_ms  Action's interval (will be reset if in overrides)
  * \param[in]     node         Node action was performed on
  * \param[in]     xml_op       XML of operation in CIB status (if available)
  * \param[in]     overrides    Key/value table to override resource parameters
  * \param[in]     calc_secure  Whether to calculate secure digest
  * \param[in,out] scheduler    Scheduler data
  *
  * \return Pointer to new digest cache entry (or NULL on memory error)
  * \note It is the caller's responsibility to free the result using
  *       pe__free_digests().
  */
 pcmk__op_digest_t *
 pe__calculate_digests(pcmk_resource_t *rsc, const char *task,
                       guint *interval_ms, const pcmk_node_t *node,
                       const xmlNode *xml_op, GHashTable *overrides,
                       bool calc_secure, pcmk_scheduler_t *scheduler)
 {
     pcmk__op_digest_t *data = NULL;
     const char *op_version = NULL;
     GHashTable *params = NULL;
 
     CRM_CHECK(scheduler != NULL, return NULL);
 
     data = calloc(1, sizeof(pcmk__op_digest_t));
     if (data == NULL) {
-        pcmk__sched_err("Could not allocate memory for operation digest");
+        pcmk__sched_err(scheduler,
+                        "Could not allocate memory for operation digest");
         return NULL;
     }
 
     data->rc = pcmk__digest_match;
 
     if (xml_op != NULL) {
         op_version = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET);
     }
 
     if ((op_version == NULL) && (scheduler->input != NULL)) {
         op_version = crm_element_value(scheduler->input,
                                        PCMK_XA_CRM_FEATURE_SET);
     }
 
     if (op_version == NULL) {
         op_version = CRM_FEATURE_SET;
     }
 
     params = pe_rsc_params(rsc, node, scheduler);
     calculate_main_digest(data, rsc, node, params, task, interval_ms, xml_op,
                           op_version, overrides, scheduler);
     if (calc_secure) {
         calculate_secure_digest(data, rsc, params, xml_op, op_version,
                                 overrides);
     }
     calculate_restart_digest(data, xml_op, op_version);
     return data;
 }
 
 /*!
  * \internal
  * \brief Calculate action digests and store in node's digest cache
  *
  * \param[in,out] rsc          Resource that action was for
  * \param[in]     task         Name of action performed
  * \param[in]     interval_ms  Action's interval
  * \param[in,out] node         Node action was performed on
  * \param[in]     xml_op       XML of operation in CIB status (if available)
  * \param[in]     calc_secure  Whether to calculate secure digest
  * \param[in,out] scheduler    Scheduler data
  *
  * \return Pointer to node's digest cache entry
  */
 static pcmk__op_digest_t *
 rsc_action_digest(pcmk_resource_t *rsc, const char *task, guint interval_ms,
                   pcmk_node_t *node, const xmlNode *xml_op,
                   bool calc_secure, pcmk_scheduler_t *scheduler)
 {
     pcmk__op_digest_t *data = NULL;
     char *key = pcmk__op_key(rsc->id, task, interval_ms);
 
     data = g_hash_table_lookup(node->private->digest_cache, key);
     if (data == NULL) {
         data = pe__calculate_digests(rsc, task, &interval_ms, node, xml_op,
                                      NULL, calc_secure, scheduler);
         CRM_ASSERT(data != NULL);
         g_hash_table_insert(node->private->digest_cache, strdup(key), data);
     }
     free(key);
     return data;
 }
 
 /*!
  * \internal
  * \brief Calculate operation digests and compare against an XML history entry
  *
  * \param[in,out] rsc        Resource to check
  * \param[in]     xml_op     Resource history XML
  * \param[in,out] node       Node to use for digest calculation
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Pointer to node's digest cache entry, with comparison result set
  */
 pcmk__op_digest_t *
 rsc_action_digest_cmp(pcmk_resource_t *rsc, const xmlNode *xml_op,
                       pcmk_node_t *node, pcmk_scheduler_t *scheduler)
 {
     pcmk__op_digest_t *data = NULL;
     guint interval_ms = 0;
 
     const char *op_version;
     const char *task = crm_element_value(xml_op, PCMK_XA_OPERATION);
     const char *digest_all;
     const char *digest_restart;
 
     CRM_ASSERT(node != NULL);
 
     op_version = crm_element_value(xml_op, PCMK_XA_CRM_FEATURE_SET);
     digest_all = crm_element_value(xml_op, PCMK__XA_OP_DIGEST);
     digest_restart = crm_element_value(xml_op, PCMK__XA_OP_RESTART_DIGEST);
 
     crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &interval_ms);
     data = rsc_action_digest(rsc, task, interval_ms, node, xml_op,
                              pcmk_is_set(scheduler->flags,
                                          pcmk_sched_sanitized),
                              scheduler);
 
     if (digest_restart && data->digest_restart_calc && strcmp(data->digest_restart_calc, digest_restart) != 0) {
         pcmk__rsc_info(rsc,
                        "Parameters to %ums-interval %s action for %s on %s "
                        "changed: hash was %s vs. now %s (restart:%s) %s",
                        interval_ms, task, rsc->id, pcmk__node_name(node),
                        pcmk__s(digest_restart, "missing"),
                        data->digest_restart_calc, op_version,
                        crm_element_value(xml_op, PCMK__XA_TRANSITION_MAGIC));
         data->rc = pcmk__digest_restart;
 
     } else if (digest_all == NULL) {
         /* it is unknown what the previous op digest was */
         data->rc = pcmk__digest_unknown;
 
     } else if (strcmp(digest_all, data->digest_all_calc) != 0) {
         /* Given a non-recurring operation with extra parameters configured,
          * in case that the main digest doesn't match, even if the restart
          * digest matches, enforce a restart rather than a reload-agent anyway.
          * So that it ensures any changes of the extra parameters get applied
          * for this specific operation, and the digests calculated for the
          * resulting PCMK__XE_LRM_RSC_OP will be correct.
          * Preserve the implied rc pcmk__digest_restart for the case that the
          * main digest doesn't match.
          */
         if ((interval_ms == 0) && (data->rc == pcmk__digest_restart)) {
             pcmk__rsc_info(rsc,
                            "Parameters containing extra ones to %ums-interval"
                            " %s action for %s on %s "
                            "changed: hash was %s vs. now %s (restart:%s) %s",
                            interval_ms, task, rsc->id, pcmk__node_name(node),
                            pcmk__s(digest_all, "missing"),
                            data->digest_all_calc, op_version,
                            crm_element_value(xml_op,
                                              PCMK__XA_TRANSITION_MAGIC));
 
         } else {
             pcmk__rsc_info(rsc,
                            "Parameters to %ums-interval %s action for %s on %s "
                            "changed: hash was %s vs. now %s (%s:%s) %s",
                            interval_ms, task, rsc->id, pcmk__node_name(node),
                            pcmk__s(digest_all, "missing"),
                            data->digest_all_calc,
                            (interval_ms > 0)? "reschedule" : "reload",
                            op_version,
                            crm_element_value(xml_op,
                                              PCMK__XA_TRANSITION_MAGIC));
             data->rc = pcmk__digest_mismatch;
         }
 
     } else {
         data->rc = pcmk__digest_match;
     }
     return data;
 }
 
 /*!
  * \internal
  * \brief Create an unfencing summary for use in special node attribute
  *
  * Create a string combining a fence device's resource ID, agent type, and
  * parameter digest (whether for all parameters or just non-private parameters).
  * This can be stored in a special node attribute, allowing us to detect changes
  * in either the agent type or parameters, to know whether unfencing must be
  * redone or can be safely skipped when the device's history is cleaned.
  *
  * \param[in] rsc_id        Fence device resource ID
  * \param[in] agent_type    Fence device agent
  * \param[in] param_digest  Fence device parameter digest
  *
  * \return Newly allocated string with unfencing digest
  * \note The caller is responsible for freeing the result.
  */
 static inline char *
 create_unfencing_summary(const char *rsc_id, const char *agent_type,
                          const char *param_digest)
 {
     return crm_strdup_printf("%s:%s:%s", rsc_id, agent_type, param_digest);
 }
 
 /*!
  * \internal
  * \brief Check whether a node can skip unfencing
  *
  * Check whether a fence device's current definition matches a node's
  * stored summary of when it was last unfenced by the device.
  *
  * \param[in] rsc_id        Fence device's resource ID
  * \param[in] agent         Fence device's agent type
  * \param[in] digest_calc   Fence device's current parameter digest
  * \param[in] node_summary  Value of node's special unfencing node attribute
  *                          (a comma-separated list of unfencing summaries for
  *                          all devices that have unfenced this node)
  *
  * \return TRUE if digest matches, FALSE otherwise
  */
 static bool
 unfencing_digest_matches(const char *rsc_id, const char *agent,
                          const char *digest_calc, const char *node_summary)
 {
     bool matches = FALSE;
 
     if (rsc_id && agent && digest_calc && node_summary) {
         char *search_secure = create_unfencing_summary(rsc_id, agent,
                                                        digest_calc);
 
         /* The digest was calculated including the device ID and agent,
          * so there is no risk of collision using strstr().
          */
         matches = (strstr(node_summary, search_secure) != NULL);
         crm_trace("Calculated unfencing digest '%s' %sfound in '%s'",
                   search_secure, matches? "" : "not ", node_summary);
         free(search_secure);
     }
     return matches;
 }
 
 /* Magic string to use as action name for digest cache entries used for
  * unfencing checks. This is not a real action name (i.e. "on"), so
  * pcmk__check_action_config() won't confuse these entries with real actions.
  */
 #define STONITH_DIGEST_TASK "stonith-on"
 
 /*!
  * \internal
  * \brief Calculate fence device digests and digest comparison result
  *
  * \param[in,out] rsc        Fence device resource
  * \param[in]     agent      Fence device's agent type
  * \param[in,out] node       Node with digest cache to use
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Node's digest cache entry
  */
 pcmk__op_digest_t *
 pe__compare_fencing_digest(pcmk_resource_t *rsc, const char *agent,
                            pcmk_node_t *node, pcmk_scheduler_t *scheduler)
 {
     const char *node_summary = NULL;
 
     // Calculate device's current parameter digests
     pcmk__op_digest_t *data = rsc_action_digest(rsc, STONITH_DIGEST_TASK, 0U,
                                                 node, NULL, TRUE, scheduler);
 
     // Check whether node has special unfencing summary node attribute
     node_summary = pcmk__node_attr(node, CRM_ATTR_DIGESTS_ALL, NULL,
                                    pcmk__rsc_node_current);
     if (node_summary == NULL) {
         data->rc = pcmk__digest_unknown;
         return data;
     }
 
     // Check whether full parameter digest matches
     if (unfencing_digest_matches(rsc->id, agent, data->digest_all_calc,
                                  node_summary)) {
         data->rc = pcmk__digest_match;
         return data;
     }
 
     // Check whether secure parameter digest matches
     node_summary = pcmk__node_attr(node, CRM_ATTR_DIGESTS_SECURE, NULL,
                                    pcmk__rsc_node_current);
     if (unfencing_digest_matches(rsc->id, agent, data->digest_secure_calc,
                                  node_summary)) {
         data->rc = pcmk__digest_match;
         if (!pcmk__is_daemon && scheduler->priv != NULL) {
             pcmk__output_t *out = scheduler->priv;
             out->info(out, "Only 'private' parameters to %s "
                       "for unfencing %s changed", rsc->id,
                       pcmk__node_name(node));
         }
         return data;
     }
 
     // Parameters don't match
     data->rc = pcmk__digest_mismatch;
     if (pcmk_is_set(scheduler->flags, pcmk_sched_sanitized)
         && (data->digest_secure_calc != NULL)) {
 
         if (scheduler->priv != NULL) {
             pcmk__output_t *out = scheduler->priv;
             char *digest = create_unfencing_summary(rsc->id, agent,
                                                     data->digest_secure_calc);
 
             out->info(out, "Parameters to %s for unfencing "
                       "%s changed, try '%s'", rsc->id,
                       pcmk__node_name(node), digest);
             free(digest);
         } else if (!pcmk__is_daemon) {
             char *digest = create_unfencing_summary(rsc->id, agent,
                                                     data->digest_secure_calc);
 
             printf("Parameters to %s for unfencing %s changed, try '%s'\n",
                    rsc->id, pcmk__node_name(node), digest);
             free(digest);
         }
     }
     return data;
 }
diff --git a/lib/pengine/pe_notif.c b/lib/pengine/pe_notif.c
index 3902024bde..bf2e806d0d 100644
--- a/lib/pengine/pe_notif.c
+++ b/lib/pengine/pe_notif.c
@@ -1,1014 +1,1016 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/common/xml.h>
 
 #include <crm/pengine/internal.h>
 #include <pacemaker-internal.h>
 
 #include "pe_status_private.h"
 
 typedef struct notify_entry_s {
     const pcmk_resource_t *rsc;
     const pcmk_node_t *node;
 } notify_entry_t;
 
 /*!
  * \internal
  * \brief Compare two notification entries
  *
  * Compare two notification entries, where the one with the alphabetically first
  * resource name (or if equal, node ID) sorts as first, with NULL sorting as
  * less than non-NULL.
  *
  * \param[in] a  First notification entry to compare
  * \param[in] b  Second notification entry to compare
  *
  * \return -1 if \p a sorts before \p b, 0 if they are equal, otherwise 1
  */
 static gint
 compare_notify_entries(gconstpointer a, gconstpointer b)
 {
     int tmp;
     const notify_entry_t *entry_a = a;
     const notify_entry_t *entry_b = b;
 
     // NULL a or b is not actually possible
     if ((entry_a == NULL) && (entry_b == NULL)) {
         return 0;
     }
     if (entry_a == NULL) {
         return 1;
     }
     if (entry_b == NULL) {
         return -1;
     }
 
     // NULL resources sort first
     if ((entry_a->rsc == NULL) && (entry_b->rsc == NULL)) {
         return 0;
     }
     if (entry_a->rsc == NULL) {
         return 1;
     }
     if (entry_b->rsc == NULL) {
         return -1;
     }
 
     // Compare resource names
     tmp = strcmp(entry_a->rsc->id, entry_b->rsc->id);
     if (tmp != 0) {
         return tmp;
     }
 
     // Otherwise NULL nodes sort first
     if ((entry_a->node == NULL) && (entry_b->node == NULL)) {
         return 0;
     }
     if (entry_a->node == NULL) {
         return 1;
     }
     if (entry_b->node == NULL) {
         return -1;
     }
 
     // Finally, compare node IDs
     return strcmp(entry_a->node->private->id, entry_b->node->private->id);
 }
 
 /*!
  * \internal
  * \brief Duplicate a notification entry
  *
  * \param[in] entry  Entry to duplicate
  *
  * \return Newly allocated duplicate of \p entry
  * \note It is the caller's responsibility to free the return value.
  */
 static notify_entry_t *
 dup_notify_entry(const notify_entry_t *entry)
 {
     notify_entry_t *dup = pcmk__assert_alloc(1, sizeof(notify_entry_t));
 
     dup->rsc = entry->rsc;
     dup->node = entry->node;
     return dup;
 }
 
 /*!
  * \internal
  * \brief Given a list of nodes, create strings with node names
  *
  * \param[in]  list             List of nodes (as pcmk_node_t *)
  * \param[out] all_node_names   If not NULL, will be set to space-separated list
  *                              of the names of all nodes in \p list
  * \param[out] host_node_names  Same as \p all_node_names, except active
  *                              guest nodes will list the name of their host
  *
  * \note The caller is responsible for freeing the output argument values using
  *       \p g_string_free().
  */
 static void
 get_node_names(const GList *list, GString **all_node_names,
                GString **host_node_names)
 {
     if (all_node_names != NULL) {
         *all_node_names = NULL;
     }
     if (host_node_names != NULL) {
         *host_node_names = NULL;
     }
 
     for (const GList *iter = list; iter != NULL; iter = iter->next) {
         const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
 
         if (node->private->name == NULL) {
             continue;
         }
 
         // Always add to list of all node names
         if (all_node_names != NULL) {
             pcmk__add_word(all_node_names, 1024, node->private->name);
         }
 
         // Add to host node name list if appropriate
         if (host_node_names != NULL) {
             if (pcmk__is_guest_or_bundle_node(node)) {
                 const pcmk_resource_t *launcher = NULL;
 
                 launcher = node->private->remote->private->launcher;
                 if (launcher->private->active_nodes != NULL) {
                     node = pcmk__current_node(launcher);
                     if (node->private->name == NULL) {
                         continue;
                     }
                 }
             }
             pcmk__add_word(host_node_names, 1024, node->private->name);
         }
     }
 
     if ((all_node_names != NULL) && (*all_node_names == NULL)) {
         *all_node_names = g_string_new(" ");
     }
     if ((host_node_names != NULL) && (*host_node_names == NULL)) {
         *host_node_names = g_string_new(" ");
     }
 }
 
 /*!
  * \internal
  * \brief Create strings of instance and node names from notification entries
  *
  * \param[in,out] list        List of notification entries (will be sorted here)
  * \param[out]    rsc_names   If not NULL, will be set to space-separated list
  *                            of clone instances from \p list
  * \param[out]    node_names  If not NULL, will be set to space-separated list
  *                            of node names from \p list
  *
  * \return (Possibly new) head of sorted \p list
  * \note The caller is responsible for freeing the output argument values using
  *       \p g_list_free_full() and \p g_string_free().
  */
 static GList *
 notify_entries_to_strings(GList *list, GString **rsc_names,
                           GString **node_names)
 {
     const char *last_rsc_id = NULL;
 
     // Initialize output lists to NULL
     if (rsc_names != NULL) {
         *rsc_names = NULL;
     }
     if (node_names != NULL) {
         *node_names = NULL;
     }
 
     // Sort input list for user-friendliness (and ease of filtering duplicates)
     list = g_list_sort(list, compare_notify_entries);
 
     for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         notify_entry_t *entry = (notify_entry_t *) gIter->data;
 
         // Entry must have a resource (with ID)
         CRM_LOG_ASSERT((entry != NULL) && (entry->rsc != NULL)
                        && (entry->rsc->id != NULL));
         if ((entry == NULL) || (entry->rsc == NULL)
             || (entry->rsc->id == NULL)) {
             continue;
         }
 
         // Entry must have a node unless listing inactive resources
         CRM_LOG_ASSERT((node_names == NULL) || (entry->node != NULL));
         if ((node_names != NULL) && (entry->node == NULL)) {
             continue;
         }
 
         // Don't add duplicates of a particular clone instance
         if (pcmk__str_eq(entry->rsc->id, last_rsc_id, pcmk__str_none)) {
             continue;
         }
         last_rsc_id = entry->rsc->id;
 
         if (rsc_names != NULL) {
             pcmk__add_word(rsc_names, 1024, entry->rsc->id);
         }
         if ((node_names != NULL) && (entry->node->private->name != NULL)) {
             pcmk__add_word(node_names, 1024, entry->node->private->name);
         }
     }
 
     // If there are no entries, return "empty" lists
     if ((rsc_names != NULL) && (*rsc_names == NULL)) {
         *rsc_names = g_string_new(" ");
     }
     if ((node_names != NULL) && (*node_names == NULL)) {
         *node_names = g_string_new(" ");
     }
 
     return list;
 }
 
 /*!
  * \internal
  * \brief Copy a meta-attribute into a notify action
  *
  * \param[in]     key        Name of meta-attribute to copy
  * \param[in]     value      Value of meta-attribute to copy
  * \param[in,out] user_data  Notify action to copy into
  */
 static void
 copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
 {
     pcmk_action_t *notify = (pcmk_action_t *) user_data;
 
     /* Any existing meta-attributes (for example, the action timeout) are for
      * the notify action itself, so don't override those.
      */
     if (g_hash_table_lookup(notify->meta, (const char *) key) != NULL) {
         return;
     }
 
     pcmk__insert_dup(notify->meta, (const char *) key, (const char *) value);
 }
 
 static void
 add_notify_data_to_action_meta(const notify_data_t *n_data,
                                pcmk_action_t *action)
 {
     for (const GSList *item = n_data->keys; item; item = item->next) {
         const pcmk_nvpair_t *nvpair = (const pcmk_nvpair_t *) item->data;
 
         pcmk__insert_meta(action, nvpair->name, nvpair->value);
     }
 }
 
 /*!
  * \internal
  * \brief Create a new notify pseudo-action for a clone resource
  *
  * \param[in,out] rsc           Clone resource that notification is for
  * \param[in]     action        Action to use in notify action key
  * \param[in]     notif_action  PCMK_ACTION_NOTIFY or PCMK_ACTION_NOTIFIED
  * \param[in]     notif_type    "pre", "post", "confirmed-pre", "confirmed-post"
  *
  * \return Newly created notify pseudo-action
  */
 static pcmk_action_t *
 new_notify_pseudo_action(pcmk_resource_t *rsc, const pcmk_action_t *action,
                          const char *notif_action, const char *notif_type)
 {
     pcmk_action_t *notify = NULL;
 
     notify = custom_action(rsc,
                            pcmk__notify_key(rsc->id, notif_type, action->task),
                            notif_action, NULL,
                            pcmk_is_set(action->flags, pcmk__action_optional),
                            rsc->private->scheduler);
     pcmk__set_action_flags(notify, pcmk__action_pseudo);
     pcmk__insert_meta(notify, "notify_key_type", notif_type);
     pcmk__insert_meta(notify, "notify_key_operation", action->task);
     return notify;
 }
 
 /*!
  * \internal
  * \brief Create a new notify action for a clone instance
  *
  * \param[in,out] rsc          Clone instance that notification is for
  * \param[in]     node         Node that notification is for
  * \param[in,out] op           Action that notification is for
  * \param[in,out] notify_done  Parent pseudo-action for notifications complete
  * \param[in]     n_data       Notification values to add to action meta-data
  *
  * \return Newly created notify action
  */
 static pcmk_action_t *
 new_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
                   pcmk_action_t *op, pcmk_action_t *notify_done,
                   const notify_data_t *n_data)
 {
     char *key = NULL;
     pcmk_action_t *notify_action = NULL;
     const char *value = NULL;
     const char *task = NULL;
     const char *skip_reason = NULL;
 
     CRM_CHECK((rsc != NULL) && (node != NULL), return NULL);
 
     // Ensure we have all the info we need
     if (op == NULL) {
         skip_reason = "no action";
     } else if (notify_done == NULL) {
         skip_reason = "no parent notification";
     } else if (!node->details->online) {
         skip_reason = "node offline";
     } else if (!pcmk_is_set(op->flags, pcmk__action_runnable)) {
         skip_reason = "original action not runnable";
     }
     if (skip_reason != NULL) {
         pcmk__rsc_trace(rsc, "Skipping notify action for %s on %s: %s",
                         rsc->id, pcmk__node_name(node), skip_reason);
         return NULL;
     }
 
     value = g_hash_table_lookup(op->meta, "notify_type");     // "pre" or "post"
     task = g_hash_table_lookup(op->meta, "notify_operation"); // original action
 
     pcmk__rsc_trace(rsc, "Creating notify action for %s on %s (%s-%s)",
                     rsc->id, pcmk__node_name(node), value, task);
 
     // Create the notify action
     key = pcmk__notify_key(rsc->id, value, task);
     notify_action = custom_action(rsc, key, op->task, node,
                                   pcmk_is_set(op->flags, pcmk__action_optional),
                                   rsc->private->scheduler);
 
     // Add meta-data to notify action
     g_hash_table_foreach(op->meta, copy_meta_to_notify, notify_action);
     add_notify_data_to_action_meta(n_data, notify_action);
 
     // Order notify after original action and before parent notification
     order_actions(op, notify_action, pcmk__ar_ordered);
     order_actions(notify_action, notify_done, pcmk__ar_ordered);
     return notify_action;
 }
 
 /*!
  * \internal
  * \brief Create a new "post-" notify action for a clone instance
  *
  * \param[in,out] rsc     Clone instance that notification is for
  * \param[in]     node    Node that notification is for
  * \param[in,out] n_data  Notification values to add to action meta-data
  */
 static void
 new_post_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
                        notify_data_t *n_data)
 {
     pcmk_action_t *notify = NULL;
 
     CRM_ASSERT(n_data != NULL);
 
     // Create the "post-" notify action for specified instance
     notify = new_notify_action(rsc, node, n_data->post, n_data->post_done,
                                n_data);
     if (notify != NULL) {
         notify->priority = PCMK_SCORE_INFINITY;
     }
 
     // Order recurring monitors after all "post-" notifications complete
     if (n_data->post_done == NULL) {
         return;
     }
     for (GList *iter = rsc->private->actions; iter != NULL; iter = iter->next) {
         pcmk_action_t *mon = (pcmk_action_t *) iter->data;
         const char *interval_ms_s = NULL;
 
         interval_ms_s = g_hash_table_lookup(mon->meta, PCMK_META_INTERVAL);
         if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
             || pcmk__str_eq(mon->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
             continue; // Not a recurring monitor
         }
         order_actions(n_data->post_done, mon, pcmk__ar_ordered);
     }
 }
 
 /*!
  * \internal
  * \brief Create and order notification pseudo-actions for a clone action
  *
  * In addition to the actual notify actions needed for each clone instance,
  * clone notifications also require pseudo-actions to provide ordering points
  * in the notification process. This creates the notification data, along with
  * appropriate pseudo-actions and their orderings.
  *
  * For example, the ordering sequence for starting a clone is:
  *
  *     "pre-" notify pseudo-action for clone
  *     -> "pre-" notify actions for each clone instance
  *     -> "pre-" notifications complete pseudo-action for clone
  *     -> start actions for each clone instance
  *     -> "started" pseudo-action for clone
  *     -> "post-" notify pseudo-action for clone
  *     -> "post-" notify actions for each clone instance
  *     -> "post-" notifications complete pseudo-action for clone
  *
  * \param[in,out] rsc       Clone that notifications are for
  * \param[in]     task      Name of action that notifications are for
  * \param[in,out] action    If not NULL, create a "pre-" pseudo-action ordered
  *                          before a "pre-" complete pseudo-action, ordered
  *                          before this action
  * \param[in,out] complete  If not NULL, create a "post-" pseudo-action ordered
  *                          after this action, and a "post-" complete
  *                          pseudo-action ordered after that
  *
  * \return Newly created notification data
  */
 notify_data_t *
 pe__action_notif_pseudo_ops(pcmk_resource_t *rsc, const char *task,
                             pcmk_action_t *action, pcmk_action_t *complete)
 {
     notify_data_t *n_data = NULL;
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_notify)) {
         return NULL;
     }
 
     n_data = pcmk__assert_alloc(1, sizeof(notify_data_t));
 
     n_data->action = task;
 
     if (action != NULL) { // Need "pre-" pseudo-actions
 
         // Create "pre-" notify pseudo-action for clone
         n_data->pre = new_notify_pseudo_action(rsc, action, PCMK_ACTION_NOTIFY,
                                                "pre");
         pcmk__set_action_flags(n_data->pre, pcmk__action_runnable);
         pcmk__insert_meta(n_data->pre, "notify_type", "pre");
         pcmk__insert_meta(n_data->pre, "notify_operation", n_data->action);
 
         // Create "pre-" notifications complete pseudo-action for clone
         n_data->pre_done = new_notify_pseudo_action(rsc, action,
                                                     PCMK_ACTION_NOTIFIED,
                                                     "confirmed-pre");
         pcmk__set_action_flags(n_data->pre_done, pcmk__action_runnable);
         pcmk__insert_meta(n_data->pre_done, "notify_type", "pre");
         pcmk__insert_meta(n_data->pre_done, "notify_operation", n_data->action);
 
         // Order "pre-" -> "pre-" complete -> original action
         order_actions(n_data->pre, n_data->pre_done, pcmk__ar_ordered);
         order_actions(n_data->pre_done, action, pcmk__ar_ordered);
     }
 
     if (complete != NULL) { // Need "post-" pseudo-actions
 
         // Create "post-" notify pseudo-action for clone
         n_data->post = new_notify_pseudo_action(rsc, complete,
                                                 PCMK_ACTION_NOTIFY, "post");
         n_data->post->priority = PCMK_SCORE_INFINITY;
         if (pcmk_is_set(complete->flags, pcmk__action_runnable)) {
             pcmk__set_action_flags(n_data->post, pcmk__action_runnable);
         } else {
             pcmk__clear_action_flags(n_data->post, pcmk__action_runnable);
         }
         pcmk__insert_meta(n_data->post, "notify_type", "post");
         pcmk__insert_meta(n_data->post, "notify_operation", n_data->action);
 
         // Create "post-" notifications complete pseudo-action for clone
         n_data->post_done = new_notify_pseudo_action(rsc, complete,
                                                      PCMK_ACTION_NOTIFIED,
                                                      "confirmed-post");
         n_data->post_done->priority = PCMK_SCORE_INFINITY;
         if (pcmk_is_set(complete->flags, pcmk__action_runnable)) {
             pcmk__set_action_flags(n_data->post_done, pcmk__action_runnable);
         } else {
             pcmk__clear_action_flags(n_data->post_done, pcmk__action_runnable);
         }
         pcmk__insert_meta(n_data->post_done, "notify_type", "post");
         pcmk__insert_meta(n_data->post_done,
                           "notify_operation", n_data->action);
 
         // Order original action complete -> "post-" -> "post-" complete
         order_actions(complete, n_data->post, pcmk__ar_first_implies_then);
         order_actions(n_data->post, n_data->post_done,
                       pcmk__ar_first_implies_then);
     }
 
     // If we created both, order "pre-" complete -> "post-"
     if ((action != NULL) && (complete != NULL)) {
         order_actions(n_data->pre_done, n_data->post, pcmk__ar_ordered);
     }
     return n_data;
 }
 
 /*!
  * \internal
  * \brief Create a new notification entry
  *
  * \param[in] rsc   Resource for notification
  * \param[in] node  Node for notification
  *
  * \return Newly allocated notification entry
  * \note The caller is responsible for freeing the return value.
  */
 static notify_entry_t *
 new_notify_entry(const pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     notify_entry_t *entry = pcmk__assert_alloc(1, sizeof(notify_entry_t));
 
     entry->rsc = rsc;
     entry->node = node;
     return entry;
 }
 
 /*!
  * \internal
  * \brief Add notification data for resource state and optionally actions
  *
- * \param[in]     rsc       Clone or clone instance being notified
+ * \param[in,out] rsc       Clone or clone instance being notified
  * \param[in]     activity  Whether to add notification entries for actions
  * \param[in,out] n_data    Notification data for clone
  */
 static void
-collect_resource_data(const pcmk_resource_t *rsc, bool activity,
+collect_resource_data(pcmk_resource_t *rsc, bool activity,
                       notify_data_t *n_data)
 {
     const GList *iter = NULL;
     notify_entry_t *entry = NULL;
     const pcmk_node_t *node = NULL;
 
     if (n_data == NULL) {
         return;
     }
 
     if (n_data->allowed_nodes == NULL) {
         n_data->allowed_nodes = rsc->private->allowed_nodes;
     }
 
     // If this is a clone, call recursively for each instance
     if (rsc->private->children != NULL) {
         for (iter = rsc->private->children; iter != NULL; iter = iter->next) {
-            const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
+            pcmk_resource_t *child = iter->data;
 
             collect_resource_data(child, activity, n_data);
         }
         return;
     }
 
     // This is a notification for a single clone instance
 
     if (rsc->private->active_nodes != NULL) {
         node = rsc->private->active_nodes->data; // First is sufficient
     }
     entry = new_notify_entry(rsc, node);
 
     // Add notification indicating the resource state
     switch (rsc->private->orig_role) {
         case pcmk_role_stopped:
             n_data->inactive = g_list_prepend(n_data->inactive, entry);
             break;
 
         case pcmk_role_started:
             n_data->active = g_list_prepend(n_data->active, entry);
             break;
 
         case pcmk_role_unpromoted:
             n_data->unpromoted = g_list_prepend(n_data->unpromoted, entry);
             n_data->active = g_list_prepend(n_data->active,
                                             dup_notify_entry(entry));
             break;
 
         case pcmk_role_promoted:
             n_data->promoted = g_list_prepend(n_data->promoted, entry);
             n_data->active = g_list_prepend(n_data->active,
                                             dup_notify_entry(entry));
             break;
 
         default:
-            pcmk__sched_err("Resource %s role on %s (%s) is not supported for "
+            pcmk__sched_err(rsc->private->scheduler,
+                            "Resource %s role on %s (%s) is not supported for "
                             "notifications (bug?)",
                             rsc->id, pcmk__node_name(node),
                             pcmk_role_text(rsc->private->orig_role));
             free(entry);
             break;
     }
 
     if (!activity) {
         return;
     }
 
     // Add notification entries for each of the resource's actions
     for (iter = rsc->private->actions; iter != NULL; iter = iter->next) {
         const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
 
         if (!pcmk_is_set(op->flags, pcmk__action_optional)
             && (op->node != NULL)) {
             enum pcmk__action_type task = pcmk__parse_action(op->task);
 
             if ((task == pcmk__action_stop) && op->node->details->unclean) {
                 // Create anyway (additional noise if node can't be fenced)
             } else if (!pcmk_is_set(op->flags, pcmk__action_runnable)) {
                 continue;
             }
 
             entry = new_notify_entry(rsc, op->node);
 
             switch (task) {
                 case pcmk__action_start:
                     n_data->start = g_list_prepend(n_data->start, entry);
                     break;
                 case pcmk__action_stop:
                     n_data->stop = g_list_prepend(n_data->stop, entry);
                     break;
                 case pcmk__action_promote:
                     n_data->promote = g_list_prepend(n_data->promote, entry);
                     break;
                 case pcmk__action_demote:
                     n_data->demote = g_list_prepend(n_data->demote, entry);
                     break;
                 default:
                     free(entry);
                     break;
             }
         }
     }
 }
 
 // For (char *) value
 #define add_notify_env(n_data, key, value) do {                         \
          n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, value);  \
     } while (0)
 
 // For (GString *) value
 #define add_notify_env_gs(n_data, key, value) do {                      \
          n_data->keys = pcmk_prepend_nvpair(n_data->keys, key,          \
                                             (const char *) value->str); \
     } while (0)
 
 // For (GString *) value
 #define add_notify_env_free_gs(n_data, key, value) do {                 \
          n_data->keys = pcmk_prepend_nvpair(n_data->keys, key,          \
                                             (const char *) value->str); \
          g_string_free(value, TRUE); value = NULL;                      \
     } while (0)
 
 /*!
  * \internal
  * \brief Create notification name/value pairs from structured data
  *
  * \param[in]     rsc       Resource that notification is for
  * \param[in,out] n_data    Notification data
  */
 static void
 add_notif_keys(const pcmk_resource_t *rsc, notify_data_t *n_data)
 {
     bool required = false; // Whether to make notify actions required
     GString *rsc_list = NULL;
     GString *node_list = NULL;
     GString *metal_list = NULL;
     const char *source = NULL;
     GList *nodes = NULL;
 
     n_data->stop = notify_entries_to_strings(n_data->stop,
                                              &rsc_list, &node_list);
     if ((strcmp(" ", (const char *) rsc_list->str) != 0)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_STOP, pcmk__str_none)) {
         required = true;
     }
     add_notify_env_free_gs(n_data, "notify_stop_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_stop_uname", node_list);
 
     if ((n_data->start != NULL)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_START, pcmk__str_none)) {
         required = true;
     }
     n_data->start = notify_entries_to_strings(n_data->start,
                                               &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_start_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_start_uname", node_list);
 
     if ((n_data->demote != NULL)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
         required = true;
     }
     n_data->demote = notify_entries_to_strings(n_data->demote,
                                                &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_demote_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_demote_uname", node_list);
 
     if ((n_data->promote != NULL)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
         required = true;
     }
     n_data->promote = notify_entries_to_strings(n_data->promote,
                                                 &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_promote_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_promote_uname", node_list);
 
     n_data->active = notify_entries_to_strings(n_data->active,
                                                &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_active_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_active_uname", node_list);
 
     n_data->unpromoted = notify_entries_to_strings(n_data->unpromoted,
                                                    &rsc_list, &node_list);
     add_notify_env_gs(n_data, "notify_unpromoted_resource", rsc_list);
     add_notify_env_gs(n_data, "notify_unpromoted_uname", node_list);
 
     // Deprecated: kept for backward compatibility with older resource agents
     add_notify_env_free_gs(n_data, "notify_slave_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_slave_uname", node_list);
 
     n_data->promoted = notify_entries_to_strings(n_data->promoted,
                                                  &rsc_list, &node_list);
     add_notify_env_gs(n_data, "notify_promoted_resource", rsc_list);
     add_notify_env_gs(n_data, "notify_promoted_uname", node_list);
 
     // Deprecated: kept for backward compatibility with older resource agents
     add_notify_env_free_gs(n_data, "notify_master_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_master_uname", node_list);
 
     n_data->inactive = notify_entries_to_strings(n_data->inactive,
                                                  &rsc_list, NULL);
     add_notify_env_free_gs(n_data, "notify_inactive_resource", rsc_list);
 
     nodes = g_hash_table_get_values(n_data->allowed_nodes);
     if (!pcmk__is_daemon) {
         /* For display purposes, sort the node list, for consistent
          * regression test output (while avoiding the performance hit
          * for the live cluster).
          */
         nodes = g_list_sort(nodes, pe__cmp_node_name);
     }
     get_node_names(nodes, &node_list, NULL);
     add_notify_env_free_gs(n_data, "notify_available_uname", node_list);
     g_list_free(nodes);
 
     source = g_hash_table_lookup(rsc->private->meta,
                                  PCMK_META_CONTAINER_ATTRIBUTE_TARGET);
     if (pcmk__str_eq(PCMK_VALUE_HOST, source, pcmk__str_none)) {
         get_node_names(rsc->private->scheduler->nodes, &node_list, &metal_list);
         add_notify_env_free_gs(n_data, "notify_all_hosts", metal_list);
     } else {
         get_node_names(rsc->private->scheduler->nodes, &node_list, NULL);
     }
     add_notify_env_free_gs(n_data, "notify_all_uname", node_list);
 
     if (required && (n_data->pre != NULL)) {
         pcmk__clear_action_flags(n_data->pre, pcmk__action_optional);
         pcmk__clear_action_flags(n_data->pre_done, pcmk__action_optional);
     }
 
     if (required && (n_data->post != NULL)) {
         pcmk__clear_action_flags(n_data->post, pcmk__action_optional);
         pcmk__clear_action_flags(n_data->post_done, pcmk__action_optional);
     }
 }
 
 /*
  * \internal
  * \brief Find any remote connection start relevant to an action
  *
  * \param[in] action  Action to check
  *
  * \return If action is behind a remote connection, connection's start
  */
 static pcmk_action_t *
 find_remote_start(pcmk_action_t *action)
 {
     if ((action != NULL) && (action->node != NULL)) {
         pcmk_resource_t *remote_rsc = action->node->private->remote;
 
         if (remote_rsc != NULL) {
             return find_first_action(remote_rsc->private->actions, NULL,
                                      PCMK_ACTION_START,
                                      NULL);
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Create notify actions, and add notify data to original actions
  *
  * \param[in,out] rsc     Clone or clone instance that notification is for
  * \param[in,out] n_data  Clone notification data for some action
  */
 static void
 create_notify_actions(pcmk_resource_t *rsc, notify_data_t *n_data)
 {
     GList *iter = NULL;
     pcmk_action_t *stop = NULL;
     pcmk_action_t *start = NULL;
     enum pcmk__action_type task = pcmk__parse_action(n_data->action);
 
     // If this is a clone, call recursively for each instance
     if (rsc->private->children != NULL) {
         g_list_foreach(rsc->private->children, (GFunc) create_notify_actions,
                        n_data);
         return;
     }
 
     // Add notification meta-attributes to original actions
     for (iter = rsc->private->actions; iter != NULL; iter = iter->next) {
         pcmk_action_t *op = (pcmk_action_t *) iter->data;
 
         if (!pcmk_is_set(op->flags, pcmk__action_optional)
             && (op->node != NULL)) {
             switch (pcmk__parse_action(op->task)) {
                 case pcmk__action_start:
                 case pcmk__action_stop:
                 case pcmk__action_promote:
                 case pcmk__action_demote:
                     add_notify_data_to_action_meta(n_data, op);
                     break;
                 default:
                     break;
             }
         }
     }
 
     // Skip notify action itself if original action was not needed
     switch (task) {
         case pcmk__action_start:
             if (n_data->start == NULL) {
                 pcmk__rsc_trace(rsc, "No notify action needed for %s %s",
                                 rsc->id, n_data->action);
                 return;
             }
             break;
 
         case pcmk__action_promote:
             if (n_data->promote == NULL) {
                 pcmk__rsc_trace(rsc, "No notify action needed for %s %s",
                                 rsc->id, n_data->action);
                 return;
             }
             break;
 
         case pcmk__action_demote:
             if (n_data->demote == NULL) {
                 pcmk__rsc_trace(rsc, "No notify action needed for %s %s",
                                 rsc->id, n_data->action);
                 return;
             }
             break;
 
         default:
             // We cannot do same for stop because it might be implied by fencing
             break;
     }
 
     pcmk__rsc_trace(rsc, "Creating notify actions for %s %s",
                     rsc->id, n_data->action);
 
     // Create notify actions for stop or demote
     if ((rsc->private->orig_role != pcmk_role_stopped)
         && ((task == pcmk__action_stop) || (task == pcmk__action_demote))) {
 
         stop = find_first_action(rsc->private->actions, NULL, PCMK_ACTION_STOP,
                                  NULL);
 
         for (iter = rsc->private->active_nodes;
              iter != NULL; iter = iter->next) {
 
             pcmk_node_t *current_node = (pcmk_node_t *) iter->data;
 
             /* If a stop is a pseudo-action implied by fencing, don't try to
              * notify the node getting fenced.
              */
             if ((stop != NULL)
                 && pcmk_is_set(stop->flags, pcmk__action_pseudo)
                 && (current_node->details->unclean
                     || pcmk_is_set(current_node->private->flags,
                                    pcmk__node_remote_reset))) {
                 continue;
             }
 
             new_notify_action(rsc, current_node, n_data->pre,
                               n_data->pre_done, n_data);
 
             if ((task == pcmk__action_demote) || (stop == NULL)
                 || pcmk_is_set(stop->flags, pcmk__action_optional)) {
                 new_post_notify_action(rsc, current_node, n_data);
             }
         }
     }
 
     // Create notify actions for start or promote
     if ((rsc->private->next_role != pcmk_role_stopped)
         && ((task == pcmk__action_start) || (task == pcmk__action_promote))) {
 
         start = find_first_action(rsc->private->actions, NULL,
                                   PCMK_ACTION_START, NULL);
         if (start != NULL) {
             pcmk_action_t *remote_start = find_remote_start(start);
 
             if ((remote_start != NULL)
                 && !pcmk_is_set(remote_start->flags, pcmk__action_runnable)) {
                 /* Start and promote actions for a clone instance behind
                  * a Pacemaker Remote connection happen after the
                  * connection starts. If the connection start is blocked, do
                  * not schedule notifications for these actions.
                  */
                 return;
             }
         }
         if (rsc->private->assigned_node == NULL) {
-            pcmk__sched_err("Next role '%s' but %s is not allocated",
+            pcmk__sched_err(rsc->private->scheduler,
+                            "Next role '%s' but %s is not allocated",
                             pcmk_role_text(rsc->private->next_role), rsc->id);
             return;
         }
         if ((task != pcmk__action_start) || (start == NULL)
             || pcmk_is_set(start->flags, pcmk__action_optional)) {
 
             new_notify_action(rsc, rsc->private->assigned_node, n_data->pre,
                               n_data->pre_done, n_data);
         }
         new_post_notify_action(rsc, rsc->private->assigned_node, n_data);
     }
 }
 
 /*!
  * \internal
  * \brief Create notification data and actions for one clone action
  *
  * \param[in,out] rsc     Clone resource that notification is for
  * \param[in,out] n_data  Clone notification data for some action
  */
 void
 pe__create_action_notifications(pcmk_resource_t *rsc, notify_data_t *n_data)
 {
     if ((rsc == NULL) || (n_data == NULL)) {
         return;
     }
     collect_resource_data(rsc, true, n_data);
     add_notif_keys(rsc, n_data);
     create_notify_actions(rsc, n_data);
 }
 
 /*!
  * \internal
  * \brief Free notification data for one action
  *
  * \param[in,out] n_data  Notification data to free
  */
 void
 pe__free_action_notification_data(notify_data_t *n_data)
 {
     if (n_data == NULL) {
         return;
     }
     g_list_free_full(n_data->stop, free);
     g_list_free_full(n_data->start, free);
     g_list_free_full(n_data->demote, free);
     g_list_free_full(n_data->promote, free);
     g_list_free_full(n_data->promoted, free);
     g_list_free_full(n_data->unpromoted, free);
     g_list_free_full(n_data->active, free);
     g_list_free_full(n_data->inactive, free);
     pcmk_free_nvpairs(n_data->keys);
     free(n_data);
 }
 
 /*!
  * \internal
  * \brief Order clone "notifications complete" pseudo-action after fencing
  *
  * If a stop action is implied by fencing, the usual notification pseudo-actions
  * will not be sufficient to order things properly, or even create all needed
  * notifications if the clone is also stopping on another node, and another
  * clone is ordered after it. This function creates new notification
  * pseudo-actions relative to the fencing to ensure everything works properly.
  *
  * \param[in]     stop        Stop action implied by fencing
  * \param[in,out] rsc         Clone resource that notification is for
  * \param[in,out] stonith_op  Fencing action that implies \p stop
  */
 void
 pe__order_notifs_after_fencing(const pcmk_action_t *stop, pcmk_resource_t *rsc,
                                pcmk_action_t *stonith_op)
 {
     notify_data_t *n_data;
 
     crm_info("Ordering notifications for implied %s after fencing", stop->uuid);
     n_data = pe__action_notif_pseudo_ops(rsc, PCMK_ACTION_STOP, NULL,
                                          stonith_op);
 
     if (n_data != NULL) {
         collect_resource_data(rsc, false, n_data);
         add_notify_env(n_data, "notify_stop_resource", rsc->id);
         add_notify_env(n_data, "notify_stop_uname", stop->node->private->name);
         create_notify_actions(uber_parent(rsc), n_data);
         pe__free_action_notification_data(n_data);
     }
 }
diff --git a/lib/pengine/status.c b/lib/pengine/status.c
index 83e6365baf..4d6dbdcab5 100644
--- a/lib/pengine/status.c
+++ b/lib/pengine/status.c
@@ -1,523 +1,524 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/common/cib_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/internal.h>
 #include <pe_status_private.h>
 
 /*!
  * \brief Create a new object to hold scheduler data
  *
  * \return New, initialized scheduler data on success, else NULL (and set errno)
  * \note Only pcmk_scheduler_t objects created with this function (as opposed
  *       to statically declared or directly allocated) should be used with the
  *       functions in this library, to allow for future extensions to the
  *       data type. The caller is responsible for freeing the memory with
  *       pe_free_working_set() when the instance is no longer needed.
  */
 pcmk_scheduler_t *
 pe_new_working_set(void)
 {
     pcmk_scheduler_t *scheduler = calloc(1, sizeof(pcmk_scheduler_t));
 
     if (scheduler != NULL) {
         set_working_set_defaults(scheduler);
     }
     return scheduler;
 }
 
 /*!
  * \brief Free scheduler data
  *
  * \param[in,out] scheduler  Scheduler data to free
  */
 void
 pe_free_working_set(pcmk_scheduler_t *scheduler)
 {
     if (scheduler != NULL) {
         pe_reset_working_set(scheduler);
         scheduler->priv = NULL;
         free(scheduler);
     }
 }
 
 #define XPATH_DEPRECATED_RULES                          \
     "//" PCMK_XE_OP_DEFAULTS "//" PCMK_XE_EXPRESSION    \
     "|//" PCMK_XE_OP "//" PCMK_XE_EXPRESSION
 
 /*!
  * \internal
  * \brief Log a warning for deprecated rule syntax in operations
  *
  * \param[in] scheduler  Scheduler data
  */
 static void
 check_for_deprecated_rules(pcmk_scheduler_t *scheduler)
 {
     // @COMPAT Drop this function when support for the syntax is dropped
     xmlNode *deprecated = get_xpath_object(XPATH_DEPRECATED_RULES,
                                            scheduler->input, LOG_NEVER);
 
     if (deprecated != NULL) {
         pcmk__warn_once(pcmk__wo_op_attr_expr,
                         "Support for rules with node attribute expressions in "
                         PCMK_XE_OP " or " PCMK_XE_OP_DEFAULTS " is deprecated "
                         "and will be dropped in a future release");
     }
 }
 
 /*
  * Unpack everything
  * At the end you'll have:
  *  - A list of nodes
  *  - A list of resources (each with any dependencies on other resources)
  *  - A list of constraints between resources and nodes
  *  - A list of constraints between start/stop actions
  *  - A list of nodes that need to be stonith'd
  *  - A list of nodes that need to be shutdown
  *  - A list of the possible stop/start actions (without dependencies)
  */
 gboolean
 cluster_status(pcmk_scheduler_t * scheduler)
 {
     const char *new_version = NULL;
     xmlNode *section = NULL;
 
     if ((scheduler == NULL) || (scheduler->input == NULL)) {
         return FALSE;
     }
 
     new_version = crm_element_value(scheduler->input, PCMK_XA_CRM_FEATURE_SET);
 
     if (pcmk__check_feature_set(new_version) != pcmk_rc_ok) {
         pcmk__config_err("Can't process CIB with feature set '%s' greater than our own '%s'",
                          new_version, CRM_FEATURE_SET);
         return FALSE;
     }
 
     crm_trace("Beginning unpack");
 
     if (scheduler->failed != NULL) {
         pcmk__xml_free(scheduler->failed);
     }
     scheduler->failed = pcmk__xe_create(NULL, "failed-ops");
 
     if (scheduler->now == NULL) {
         scheduler->now = crm_time_new(NULL);
     }
 
     if (scheduler->dc_uuid == NULL) {
         scheduler->dc_uuid = crm_element_value_copy(scheduler->input,
                                                     PCMK_XA_DC_UUID);
     }
 
     if (pcmk__xe_attr_is_true(scheduler->input, PCMK_XA_HAVE_QUORUM)) {
         pcmk__set_scheduler_flags(scheduler, pcmk_sched_quorate);
     } else {
         pcmk__clear_scheduler_flags(scheduler, pcmk_sched_quorate);
     }
 
     scheduler->op_defaults = get_xpath_object("//" PCMK_XE_OP_DEFAULTS,
                                               scheduler->input, LOG_NEVER);
     check_for_deprecated_rules(scheduler);
 
     scheduler->rsc_defaults = get_xpath_object("//" PCMK_XE_RSC_DEFAULTS,
                                                scheduler->input, LOG_NEVER);
 
     section = get_xpath_object("//" PCMK_XE_CRM_CONFIG, scheduler->input,
                                LOG_TRACE);
     unpack_config(section, scheduler);
 
    if (!pcmk_any_flags_set(scheduler->flags,
                            pcmk_sched_location_only|pcmk_sched_quorate)
        && (scheduler->no_quorum_policy != pcmk_no_quorum_ignore)) {
-        pcmk__sched_warn("Fencing and resource management disabled "
+        pcmk__sched_warn(scheduler,
+                         "Fencing and resource management disabled "
                          "due to lack of quorum");
     }
 
     section = get_xpath_object("//" PCMK_XE_NODES, scheduler->input, LOG_TRACE);
     unpack_nodes(section, scheduler);
 
     section = get_xpath_object("//" PCMK_XE_RESOURCES, scheduler->input,
                                LOG_TRACE);
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
         unpack_remote_nodes(section, scheduler);
     }
     unpack_resources(section, scheduler);
 
     section = get_xpath_object("//" PCMK_XE_TAGS, scheduler->input, LOG_NEVER);
     unpack_tags(section, scheduler);
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
         section = get_xpath_object("//" PCMK_XE_STATUS, scheduler->input,
                                    LOG_TRACE);
         unpack_status(section, scheduler);
     }
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_no_counts)) {
         for (GList *item = scheduler->resources; item != NULL;
              item = item->next) {
             pcmk_resource_t *rsc = item->data;
 
             rsc->private->fns->count(item->data);
         }
         crm_trace("Cluster resource count: %d (%d disabled, %d blocked)",
                   scheduler->ninstances, scheduler->disabled_resources,
                   scheduler->blocked_resources);
     }
 
     pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_status);
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Free a list of pcmk_resource_t
  *
  * \param[in,out] resources  List to free
  *
  * \note When the scheduler's resource list is freed, that includes the original
  *       storage for the uname and id of any Pacemaker Remote nodes in the
  *       scheduler's node list, so take care not to use those afterward.
  * \todo Refactor pcmk_node_t to strdup() the node name.
  */
 static void
 pe_free_resources(GList *resources)
 {
     pcmk_resource_t *rsc = NULL;
     GList *iterator = resources;
 
     while (iterator != NULL) {
         rsc = (pcmk_resource_t *) iterator->data;
         iterator = iterator->next;
         rsc->private->fns->free(rsc);
     }
     if (resources != NULL) {
         g_list_free(resources);
     }
 }
 
 static void
 pe_free_actions(GList *actions)
 {
     GList *iterator = actions;
 
     while (iterator != NULL) {
         pe_free_action(iterator->data);
         iterator = iterator->next;
     }
     if (actions != NULL) {
         g_list_free(actions);
     }
 }
 
 static void
 pe_free_nodes(GList *nodes)
 {
     for (GList *iterator = nodes; iterator != NULL; iterator = iterator->next) {
         pcmk_node_t *node = (pcmk_node_t *) iterator->data;
 
         // Shouldn't be possible, but to be safe ...
         if (node == NULL) {
             continue;
         }
         if (node->details == NULL) {
             free(node);
             continue;
         }
 
         /* This is called after pe_free_resources(), which means that we can't
          * use node->private->name for Pacemaker Remote nodes.
          */
         crm_trace("Freeing node %s", (pcmk__is_pacemaker_remote_node(node)?
                   "(guest or remote)" : pcmk__node_name(node)));
 
         if (node->private->attrs != NULL) {
             g_hash_table_destroy(node->private->attrs);
         }
         if (node->private->utilization != NULL) {
             g_hash_table_destroy(node->private->utilization);
         }
         if (node->private->digest_cache != NULL) {
             g_hash_table_destroy(node->private->digest_cache);
         }
         g_list_free(node->details->running_rsc);
         g_list_free(node->private->assigned_resources);
         free(node->private);
         free(node->details);
         free(node->assign);
         free(node);
     }
     if (nodes != NULL) {
         g_list_free(nodes);
     }
 }
 
 static void
 pe__free_ordering(GList *constraints)
 {
     GList *iterator = constraints;
 
     while (iterator != NULL) {
         pcmk__action_relation_t *order = iterator->data;
 
         iterator = iterator->next;
 
         free(order->task1);
         free(order->task2);
         free(order);
     }
     if (constraints != NULL) {
         g_list_free(constraints);
     }
 }
 
 static void
 pe__free_location(GList *constraints)
 {
     GList *iterator = constraints;
 
     while (iterator != NULL) {
         pcmk__location_t *cons = iterator->data;
 
         iterator = iterator->next;
 
         g_list_free_full(cons->nodes, free);
         free(cons->id);
         free(cons);
     }
     if (constraints != NULL) {
         g_list_free(constraints);
     }
 }
 
 /*!
  * \brief Reset scheduler data to defaults without freeing it or constraints
  *
  * \param[in,out] scheduler  Scheduler data to reset
  *
  * \deprecated This function is deprecated as part of the API;
  *             pe_reset_working_set() should be used instead.
  */
 void
 cleanup_calculations(pcmk_scheduler_t *scheduler)
 {
     if (scheduler == NULL) {
         return;
     }
 
     pcmk__clear_scheduler_flags(scheduler, pcmk_sched_have_status);
     if (scheduler->config_hash != NULL) {
         g_hash_table_destroy(scheduler->config_hash);
     }
 
     if (scheduler->singletons != NULL) {
         g_hash_table_destroy(scheduler->singletons);
     }
 
     if (scheduler->tickets) {
         g_hash_table_destroy(scheduler->tickets);
     }
 
     if (scheduler->template_rsc_sets) {
         g_hash_table_destroy(scheduler->template_rsc_sets);
     }
 
     if (scheduler->tags) {
         g_hash_table_destroy(scheduler->tags);
     }
 
     free(scheduler->dc_uuid);
 
     crm_trace("deleting resources");
     pe_free_resources(scheduler->resources);
 
     crm_trace("deleting actions");
     pe_free_actions(scheduler->actions);
 
     crm_trace("deleting nodes");
     pe_free_nodes(scheduler->nodes);
 
     pe__free_param_checks(scheduler);
     g_list_free(scheduler->stop_needed);
     pcmk__xml_free(scheduler->graph);
     crm_time_free(scheduler->now);
     pcmk__xml_free(scheduler->input);
     pcmk__xml_free(scheduler->failed);
 
     set_working_set_defaults(scheduler);
 
     CRM_CHECK(scheduler->ordering_constraints == NULL,;
         );
     CRM_CHECK(scheduler->placement_constraints == NULL,;
         );
 }
 
 /*!
  * \brief Reset scheduler data to default state without freeing it
  *
  * \param[in,out] scheduler  Scheduler data to reset
  */
 void
 pe_reset_working_set(pcmk_scheduler_t *scheduler)
 {
     if (scheduler == NULL) {
         return;
     }
 
     crm_trace("Deleting %d ordering constraints",
               g_list_length(scheduler->ordering_constraints));
     pe__free_ordering(scheduler->ordering_constraints);
     scheduler->ordering_constraints = NULL;
 
     crm_trace("Deleting %d location constraints",
               g_list_length(scheduler->placement_constraints));
     pe__free_location(scheduler->placement_constraints);
     scheduler->placement_constraints = NULL;
 
     crm_trace("Deleting %d colocation constraints",
               g_list_length(scheduler->colocation_constraints));
     g_list_free_full(scheduler->colocation_constraints, free);
     scheduler->colocation_constraints = NULL;
 
     crm_trace("Deleting %d ticket constraints",
               g_list_length(scheduler->ticket_constraints));
     g_list_free_full(scheduler->ticket_constraints, free);
     scheduler->ticket_constraints = NULL;
 
     cleanup_calculations(scheduler);
 }
 
 void
 set_working_set_defaults(pcmk_scheduler_t *scheduler)
 {
     void *priv = scheduler->priv;
 
     memset(scheduler, 0, sizeof(pcmk_scheduler_t));
 
     scheduler->priv = priv;
     scheduler->order_id = 1;
     scheduler->action_id = 1;
     scheduler->no_quorum_policy = pcmk_no_quorum_stop;
 
     scheduler->flags = 0x0ULL;
 
     pcmk__set_scheduler_flags(scheduler,
                               pcmk_sched_symmetric_cluster
                               |pcmk_sched_stop_removed_resources
                               |pcmk_sched_cancel_removed_actions);
     if (!strcmp(PCMK__CONCURRENT_FENCING_DEFAULT, PCMK_VALUE_TRUE)) {
         pcmk__set_scheduler_flags(scheduler, pcmk_sched_concurrent_fencing);
     }
 }
 
 pcmk_resource_t *
 pe_find_resource(GList *rsc_list, const char *id)
 {
     return pe_find_resource_with_flags(rsc_list, id, pcmk_rsc_match_history);
 }
 
 pcmk_resource_t *
 pe_find_resource_with_flags(GList *rsc_list, const char *id, enum pe_find flags)
 {
     GList *rIter = NULL;
 
     for (rIter = rsc_list; id && rIter; rIter = rIter->next) {
         pcmk_resource_t *parent = rIter->data;
         pcmk_resource_t *match = parent->private->fns->find_rsc(parent, id,
                                                                 NULL, flags);
 
         if (match != NULL) {
             return match;
         }
     }
     crm_trace("No match for %s", id);
     return NULL;
 }
 
 /*!
  * \brief Find a node by name or ID in a list of nodes
  *
  * \param[in] nodes      List of nodes (as pcmk_node_t*)
  * \param[in] id         If not NULL, ID of node to find
  * \param[in] node_name  If not NULL, name of node to find
  *
  * \return Node from \p nodes that matches \p id if any,
  *         otherwise node from \p nodes that matches \p uname if any,
  *         otherwise NULL
  */
 pcmk_node_t *
 pe_find_node_any(const GList *nodes, const char *id, const char *uname)
 {
     pcmk_node_t *match = NULL;
 
     if (id != NULL) {
         match = pe_find_node_id(nodes, id);
     }
     if ((match == NULL) && (uname != NULL)) {
         match = pcmk__find_node_in_list(nodes, uname);
     }
     return match;
 }
 
 /*!
  * \brief Find a node by ID in a list of nodes
  *
  * \param[in] nodes  List of nodes (as pcmk_node_t*)
  * \param[in] id     ID of node to find
  *
  * \return Node from \p nodes that matches \p id if any, otherwise NULL
  */
 pcmk_node_t *
 pe_find_node_id(const GList *nodes, const char *id)
 {
     for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
         pcmk_node_t *node = (pcmk_node_t *) iter->data;
 
         /* @TODO Whether node IDs should be considered case-sensitive should
          * probably depend on the node type, so functionizing the comparison
          * would be worthwhile
          */
         if (pcmk__str_eq(node->private->id, id, pcmk__str_casei)) {
             return node;
         }
     }
     return NULL;
 }
 
 // Deprecated functions kept only for backward API compatibility
 // LCOV_EXCL_START
 
 #include <crm/pengine/status_compat.h>
 
 /*!
  * \brief Find a node by name in a list of nodes
  *
  * \param[in] nodes      List of nodes (as pcmk_node_t*)
  * \param[in] node_name  Name of node to find
  *
  * \return Node from \p nodes that matches \p node_name if any, otherwise NULL
  */
 pcmk_node_t *
 pe_find_node(const GList *nodes, const char *node_name)
 {
     return pcmk__find_node_in_list(nodes, node_name);
 }
 
 // LCOV_EXCL_STOP
 // End deprecated API
diff --git a/lib/pengine/unpack.c b/lib/pengine/unpack.c
index 254c905328..daa88449cb 100644
--- a/lib/pengine/unpack.c
+++ b/lib/pengine/unpack.c
@@ -1,5080 +1,5087 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 #include <string.h>
 #include <glib.h>
 #include <time.h>
 
 #include <crm/crm.h>
 #include <crm/services.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <crm/common/util.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include <pe_status_private.h>
 
 CRM_TRACE_INIT_DATA(pe_status);
 
 // A (parsed) resource action history entry
 struct action_history {
     pcmk_resource_t *rsc;       // Resource that history is for
     pcmk_node_t *node;        // Node that history is for
     xmlNode *xml;             // History entry XML
 
     // Parsed from entry XML
     const char *id;           // XML ID of history entry
     const char *key;          // Operation key of action
     const char *task;         // Action name
     const char *exit_reason;  // Exit reason given for result
     guint interval_ms;        // Action interval
     int call_id;              // Call ID of action
     int expected_exit_status; // Expected exit status of action
     int exit_status;          // Actual exit status of action
     int execution_status;     // Execution status of action
 };
 
 /* This uses pcmk__set_flags_as()/pcmk__clear_flags_as() directly rather than
  * use pcmk__set_scheduler_flags()/pcmk__clear_scheduler_flags() so that the
  * flag is stringified more readably in log messages.
  */
 #define set_config_flag(scheduler, option, flag) do {                         \
         GHashTable *config_hash = (scheduler)->config_hash;                   \
         const char *scf_value = pcmk__cluster_option(config_hash, (option));  \
                                                                               \
         if (scf_value != NULL) {                                              \
             if (crm_is_true(scf_value)) {                                     \
                 (scheduler)->flags = pcmk__set_flags_as(__func__, __LINE__,   \
                                     LOG_TRACE, "Scheduler",                   \
                                     crm_system_name, (scheduler)->flags,      \
                                     (flag), #flag);                           \
             } else {                                                          \
                 (scheduler)->flags = pcmk__clear_flags_as(__func__, __LINE__, \
                                     LOG_TRACE, "Scheduler",                   \
                                     crm_system_name, (scheduler)->flags,      \
                                     (flag), #flag);                           \
             }                                                                 \
         }                                                                     \
     } while(0)
 
 static void unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node,
                           xmlNode *xml_op, xmlNode **last_failure,
                           enum pcmk__on_fail *failed);
 static void determine_remote_online_status(pcmk_scheduler_t *scheduler,
                                            pcmk_node_t *this_node);
 static void add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node,
                            bool overwrite, pcmk_scheduler_t *scheduler);
 static void determine_online_status(const xmlNode *node_state,
                                     pcmk_node_t *this_node,
                                     pcmk_scheduler_t *scheduler);
 
 static void unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
                             pcmk_scheduler_t *scheduler);
 
 
 /*!
  * \internal
  * \brief Check whether a node is a dangling guest node
  *
  * \param[in] node  Node to check
  *
  * \return true if \p node had a Pacemaker Remote connection resource with a
  *         launcher that was removed from the CIB, otherwise false.
  */
 static bool
 is_dangling_guest_node(pcmk_node_t *node)
 {
     return pcmk__is_pacemaker_remote_node(node)
            && (node->private->remote != NULL)
            && (node->private->remote->private->launcher == NULL)
            && pcmk_is_set(node->private->remote->flags,
                           pcmk__rsc_removed_launched);
 }
 
 /*!
  * \brief Schedule a fence action for a node
  *
  * \param[in,out] scheduler       Scheduler data
  * \param[in,out] node            Node to fence
  * \param[in]     reason          Text description of why fencing is needed
  * \param[in]     priority_delay  Whether to consider
  *                                \c PCMK_OPT_PRIORITY_FENCING_DELAY
  */
 void
 pe_fence_node(pcmk_scheduler_t *scheduler, pcmk_node_t *node,
               const char *reason, bool priority_delay)
 {
     CRM_CHECK(node, return);
 
     if (pcmk__is_guest_or_bundle_node(node)) {
         // Fence a guest or bundle node by marking its launcher as failed
         pcmk_resource_t *rsc = node->private->remote->private->launcher;
 
         if (!pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
             if (!pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
                 crm_notice("Not fencing guest node %s "
                            "(otherwise would because %s): "
                            "its guest resource %s is unmanaged",
                            pcmk__node_name(node), reason, rsc->id);
             } else {
-                pcmk__sched_warn("Guest node %s will be fenced "
+                pcmk__sched_warn(scheduler,
+                                 "Guest node %s will be fenced "
                                  "(by recovering its guest resource %s): %s",
                                  pcmk__node_name(node), rsc->id, reason);
 
                 /* We don't mark the node as unclean because that would prevent the
                  * node from running resources. We want to allow it to run resources
                  * in this transition if the recovery succeeds.
                  */
                 pcmk__set_node_flags(node, pcmk__node_remote_reset);
                 pcmk__set_rsc_flags(rsc,
                                     pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
             }
         }
 
     } else if (is_dangling_guest_node(node)) {
         crm_info("Cleaning up dangling connection for guest node %s: "
                  "fencing was already done because %s, "
                  "and guest resource no longer exists",
                  pcmk__node_name(node), reason);
         pcmk__set_rsc_flags(node->private->remote,
                             pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
 
     } else if (pcmk__is_remote_node(node)) {
         pcmk_resource_t *rsc = node->private->remote;
 
         if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
             crm_notice("Not fencing remote node %s "
                        "(otherwise would because %s): connection is unmanaged",
                        pcmk__node_name(node), reason);
         } else if (!pcmk_is_set(node->private->flags, pcmk__node_remote_reset)) {
             pcmk__set_node_flags(node, pcmk__node_remote_reset);
-            pcmk__sched_warn("Remote node %s %s: %s",
+            pcmk__sched_warn(scheduler, "Remote node %s %s: %s",
                              pcmk__node_name(node),
                              pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
                              reason);
         }
         node->details->unclean = TRUE;
         // No need to apply PCMK_OPT_PRIORITY_FENCING_DELAY for remote nodes
         pe_fence_op(node, NULL, TRUE, reason, FALSE, scheduler);
 
     } else if (node->details->unclean) {
         crm_trace("Cluster node %s %s because %s",
                   pcmk__node_name(node),
                   pe_can_fence(scheduler, node)? "would also be fenced" : "also is unclean",
                   reason);
 
     } else {
-        pcmk__sched_warn("Cluster node %s %s: %s",
+        pcmk__sched_warn(scheduler, "Cluster node %s %s: %s",
                          pcmk__node_name(node),
                          pe_can_fence(scheduler, node)? "will be fenced" : "is unclean",
                          reason);
         node->details->unclean = TRUE;
         pe_fence_op(node, NULL, TRUE, reason, priority_delay, scheduler);
     }
 }
 
 // @TODO xpaths can't handle templates, rules, or id-refs
 
 // nvpair with provides or requires set to unfencing
 #define XPATH_UNFENCING_NVPAIR PCMK_XE_NVPAIR           \
     "[(@" PCMK_XA_NAME "='" PCMK_STONITH_PROVIDES "'"   \
     "or @" PCMK_XA_NAME "='" PCMK_META_REQUIRES "') "   \
     "and @" PCMK_XA_VALUE "='" PCMK_VALUE_UNFENCING "']"
 
 // unfencing in rsc_defaults or any resource
 #define XPATH_ENABLE_UNFENCING \
     "/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RESOURCES     \
     "//" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR             \
     "|/" PCMK_XE_CIB "/" PCMK_XE_CONFIGURATION "/" PCMK_XE_RSC_DEFAULTS \
     "/" PCMK_XE_META_ATTRIBUTES "/" XPATH_UNFENCING_NVPAIR
 
 static void
 set_if_xpath(uint64_t flag, const char *xpath, pcmk_scheduler_t *scheduler)
 {
     xmlXPathObjectPtr result = NULL;
 
     if (!pcmk_is_set(scheduler->flags, flag)) {
         result = xpath_search(scheduler->input, xpath);
         if (result && (numXpathResults(result) > 0)) {
             pcmk__set_scheduler_flags(scheduler, flag);
         }
         freeXpathObject(result);
     }
 }
 
 gboolean
 unpack_config(xmlNode *config, pcmk_scheduler_t *scheduler)
 {
     const char *value = NULL;
     guint interval_ms = 0U;
     GHashTable *config_hash = pcmk__strkey_table(free, free);
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .now = scheduler->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     scheduler->config_hash = config_hash;
 
     pe__unpack_dataset_nvpairs(config, PCMK_XE_CLUSTER_PROPERTY_SET, &rule_data,
                                config_hash, PCMK_VALUE_CIB_BOOTSTRAP_OPTIONS,
                                FALSE, scheduler);
 
     pcmk__validate_cluster_options(config_hash);
 
     set_config_flag(scheduler, PCMK_OPT_ENABLE_STARTUP_PROBES,
                     pcmk_sched_probe_resources);
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_probe_resources)) {
         crm_info("Startup probes: disabled (dangerous)");
     }
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_HAVE_WATCHDOG);
     if (value && crm_is_true(value)) {
         crm_info("Watchdog-based self-fencing will be performed via SBD if "
                  "fencing is required and " PCMK_OPT_STONITH_WATCHDOG_TIMEOUT
                  " is nonzero");
         pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_fencing);
     }
 
     /* Set certain flags via xpath here, so they can be used before the relevant
      * configuration sections are unpacked.
      */
     set_if_xpath(pcmk_sched_enable_unfencing, XPATH_ENABLE_UNFENCING,
                  scheduler);
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_STONITH_TIMEOUT);
     pcmk_parse_interval_spec(value, &interval_ms);
 
     if (interval_ms >= INT_MAX) {
         scheduler->stonith_timeout = INT_MAX;
     } else {
         scheduler->stonith_timeout = (int) interval_ms;
     }
     crm_debug("STONITH timeout: %d", scheduler->stonith_timeout);
 
     set_config_flag(scheduler, PCMK_OPT_STONITH_ENABLED,
                     pcmk_sched_fencing_enabled);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
         crm_debug("STONITH of failed nodes is enabled");
     } else {
         crm_debug("STONITH of failed nodes is disabled");
     }
 
     scheduler->stonith_action = pcmk__cluster_option(config_hash,
                                                      PCMK_OPT_STONITH_ACTION);
     if (!strcmp(scheduler->stonith_action, PCMK__ACTION_POWEROFF)) {
         pcmk__warn_once(pcmk__wo_poweroff,
                         "Support for " PCMK_OPT_STONITH_ACTION " of "
                         "'" PCMK__ACTION_POWEROFF "' is deprecated and will be "
                         "removed in a future release "
                         "(use '" PCMK_ACTION_OFF "' instead)");
         scheduler->stonith_action = PCMK_ACTION_OFF;
     }
     crm_trace("STONITH will %s nodes", scheduler->stonith_action);
 
     set_config_flag(scheduler, PCMK_OPT_CONCURRENT_FENCING,
                     pcmk_sched_concurrent_fencing);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_concurrent_fencing)) {
         crm_debug("Concurrent fencing is enabled");
     } else {
         crm_debug("Concurrent fencing is disabled");
     }
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_PRIORITY_FENCING_DELAY);
     if (value) {
         pcmk_parse_interval_spec(value, &interval_ms);
         scheduler->priority_fencing_delay = (int) (interval_ms / 1000);
         crm_trace("Priority fencing delay is %ds",
                   scheduler->priority_fencing_delay);
     }
 
     set_config_flag(scheduler, PCMK_OPT_STOP_ALL_RESOURCES,
                     pcmk_sched_stop_all);
     crm_debug("Stop all active resources: %s",
               pcmk__flag_text(scheduler->flags, pcmk_sched_stop_all));
 
     set_config_flag(scheduler, PCMK_OPT_SYMMETRIC_CLUSTER,
                     pcmk_sched_symmetric_cluster);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)) {
         crm_debug("Cluster is symmetric" " - resources can run anywhere by default");
     }
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_NO_QUORUM_POLICY);
 
     if (pcmk__str_eq(value, PCMK_VALUE_IGNORE, pcmk__str_casei)) {
         scheduler->no_quorum_policy = pcmk_no_quorum_ignore;
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_FREEZE, pcmk__str_casei)) {
         scheduler->no_quorum_policy = pcmk_no_quorum_freeze;
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_DEMOTE, pcmk__str_casei)) {
         scheduler->no_quorum_policy = pcmk_no_quorum_demote;
 
     } else if (pcmk__str_eq(value, PCMK_VALUE_FENCE_LEGACY, pcmk__str_casei)) {
         if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
             int do_panic = 0;
 
             crm_element_value_int(scheduler->input, PCMK_XA_NO_QUORUM_PANIC,
                                   &do_panic);
             if (do_panic || pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
                 scheduler->no_quorum_policy = pcmk_no_quorum_fence;
             } else {
                 crm_notice("Resetting " PCMK_OPT_NO_QUORUM_POLICY
                            " to 'stop': cluster has never had quorum");
                 scheduler->no_quorum_policy = pcmk_no_quorum_stop;
             }
         } else {
             pcmk__config_err("Resetting " PCMK_OPT_NO_QUORUM_POLICY
                              " to 'stop' because fencing is disabled");
             scheduler->no_quorum_policy = pcmk_no_quorum_stop;
         }
 
     } else {
         scheduler->no_quorum_policy = pcmk_no_quorum_stop;
     }
 
     switch (scheduler->no_quorum_policy) {
         case pcmk_no_quorum_freeze:
             crm_debug("On loss of quorum: Freeze resources");
             break;
         case pcmk_no_quorum_stop:
             crm_debug("On loss of quorum: Stop ALL resources");
             break;
         case pcmk_no_quorum_demote:
             crm_debug("On loss of quorum: "
                       "Demote promotable resources and stop other resources");
             break;
         case pcmk_no_quorum_fence:
             crm_notice("On loss of quorum: Fence all remaining nodes");
             break;
         case pcmk_no_quorum_ignore:
             crm_notice("On loss of quorum: Ignore");
             break;
     }
 
     set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_RESOURCES,
                     pcmk_sched_stop_removed_resources);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
         crm_trace("Orphan resources are stopped");
     } else {
         crm_trace("Orphan resources are ignored");
     }
 
     set_config_flag(scheduler, PCMK_OPT_STOP_ORPHAN_ACTIONS,
                     pcmk_sched_cancel_removed_actions);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_cancel_removed_actions)) {
         crm_trace("Orphan resource actions are stopped");
     } else {
         crm_trace("Orphan resource actions are ignored");
     }
 
     value = pcmk__cluster_option(config_hash, PCMK__OPT_REMOVE_AFTER_STOP);
     if (value != NULL) {
         if (crm_is_true(value)) {
             pcmk__set_scheduler_flags(scheduler, pcmk_sched_remove_after_stop);
             pcmk__warn_once(pcmk__wo_remove_after,
                             "Support for the " PCMK__OPT_REMOVE_AFTER_STOP
                             " cluster property is deprecated and will be "
                             "removed in a future release");
         } else {
             pcmk__clear_scheduler_flags(scheduler,
                                         pcmk_sched_remove_after_stop);
         }
     }
 
     set_config_flag(scheduler, PCMK_OPT_MAINTENANCE_MODE,
                     pcmk_sched_in_maintenance);
     crm_trace("Maintenance mode: %s",
               pcmk__flag_text(scheduler->flags, pcmk_sched_in_maintenance));
 
     set_config_flag(scheduler, PCMK_OPT_START_FAILURE_IS_FATAL,
                     pcmk_sched_start_failure_fatal);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_start_failure_fatal)) {
         crm_trace("Start failures are always fatal");
     } else {
         crm_trace("Start failures are handled by failcount");
     }
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
         set_config_flag(scheduler, PCMK_OPT_STARTUP_FENCING,
                         pcmk_sched_startup_fencing);
     }
     if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
         crm_trace("Unseen nodes will be fenced");
     } else {
         pcmk__warn_once(pcmk__wo_blind,
                         "Blind faith: not fencing unseen nodes");
     }
 
     pe__unpack_node_health_scores(scheduler);
 
     scheduler->placement_strategy =
         pcmk__cluster_option(config_hash, PCMK_OPT_PLACEMENT_STRATEGY);
     crm_trace("Placement strategy: %s", scheduler->placement_strategy);
 
     set_config_flag(scheduler, PCMK_OPT_SHUTDOWN_LOCK,
                     pcmk_sched_shutdown_lock);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
         value = pcmk__cluster_option(config_hash, PCMK_OPT_SHUTDOWN_LOCK_LIMIT);
         pcmk_parse_interval_spec(value, &(scheduler->shutdown_lock));
         scheduler->shutdown_lock /= 1000;
         crm_trace("Resources will be locked to nodes that were cleanly "
                   "shut down (locks expire after %s)",
                   pcmk__readable_interval(scheduler->shutdown_lock));
     } else {
         crm_trace("Resources will not be locked to nodes that were cleanly "
                   "shut down");
     }
 
     value = pcmk__cluster_option(config_hash, PCMK_OPT_NODE_PENDING_TIMEOUT);
     pcmk_parse_interval_spec(value, &(scheduler->node_pending_timeout));
     scheduler->node_pending_timeout /= 1000;
     if (scheduler->node_pending_timeout == 0) {
         crm_trace("Do not fence pending nodes");
     } else {
         crm_trace("Fence pending nodes after %s",
                   pcmk__readable_interval(scheduler->node_pending_timeout
                                           * 1000));
     }
 
     return TRUE;
 }
 
 pcmk_node_t *
 pe_create_node(const char *id, const char *uname, const char *type,
                const char *score, pcmk_scheduler_t *scheduler)
 {
     pcmk_node_t *new_node = NULL;
 
     if (pcmk_find_node(scheduler, uname) != NULL) {
         pcmk__config_warn("More than one node entry has name '%s'", uname);
     }
 
     new_node = calloc(1, sizeof(pcmk_node_t));
     if (new_node == NULL) {
-        pcmk__sched_err("Could not allocate memory for node %s", uname);
+        pcmk__sched_err(scheduler, "Could not allocate memory for node %s",
+                        uname);
         return NULL;
     }
 
     new_node->assign = calloc(1, sizeof(struct pcmk__node_assignment));
     new_node->details = calloc(1, sizeof(struct pcmk__node_details));
     new_node->private = calloc(1, sizeof(pcmk__node_private_t));
     if ((new_node->assign == NULL) || (new_node->details == NULL)
         || (new_node->private == NULL)) {
         free(new_node->assign);
         free(new_node->details);
         free(new_node->private);
         free(new_node);
-        pcmk__sched_err("Could not allocate memory for node %s", uname);
+        pcmk__sched_err(scheduler, "Could not allocate memory for node %s",
+                        uname);
         return NULL;
     }
 
     crm_trace("Creating node for entry %s/%s", uname, id);
     new_node->assign->score = char2score(score);
     new_node->private->id = id;
     new_node->private->name = uname;
     new_node->private->flags = pcmk__node_probes_allowed;
     new_node->details->online = FALSE;
     new_node->details->shutdown = FALSE;
     new_node->details->running_rsc = NULL;
     new_node->private->scheduler = scheduler;
 
     if (pcmk__str_eq(type, PCMK_VALUE_MEMBER,
                      pcmk__str_null_matches|pcmk__str_casei)) {
         new_node->private->variant = pcmk__node_variant_cluster;
 
     } else if (pcmk__str_eq(type, PCMK_VALUE_REMOTE, pcmk__str_casei)) {
         new_node->private->variant = pcmk__node_variant_remote;
         pcmk__set_scheduler_flags(scheduler, pcmk_sched_have_remote_nodes);
 
     } else {
         /* @COMPAT 'ping' is the default for backward compatibility, but it
          * should be changed to 'member' at a compatibility break
          */
         if (!pcmk__str_eq(type, PCMK__VALUE_PING, pcmk__str_casei)) {
             pcmk__config_warn("Node %s has unrecognized type '%s', "
                               "assuming '" PCMK__VALUE_PING "'",
                               pcmk__s(uname, "without name"), type);
         }
         pcmk__warn_once(pcmk__wo_ping_node,
                         "Support for nodes of type '" PCMK__VALUE_PING "' "
                         "(such as %s) is deprecated and will be removed in a "
                         "future release",
                         pcmk__s(uname, "unnamed node"));
         new_node->private->variant = pcmk__node_variant_ping;
     }
 
     new_node->private->attrs = pcmk__strkey_table(free, free);
 
     if (pcmk__is_pacemaker_remote_node(new_node)) {
         pcmk__insert_dup(new_node->private->attrs, CRM_ATTR_KIND, "remote");
     } else {
         pcmk__insert_dup(new_node->private->attrs, CRM_ATTR_KIND, "cluster");
     }
 
     new_node->private->utilization = pcmk__strkey_table(free, free);
     new_node->private->digest_cache = pcmk__strkey_table(free,
                                                          pe__free_digests);
 
     scheduler->nodes = g_list_insert_sorted(scheduler->nodes, new_node,
                                             pe__cmp_node_name);
     return new_node;
 }
 
 static const char *
 expand_remote_rsc_meta(xmlNode *xml_obj, xmlNode *parent, pcmk_scheduler_t *data)
 {
     xmlNode *attr_set = NULL;
     xmlNode *attr = NULL;
 
     const char *container_id = pcmk__xe_id(xml_obj);
     const char *remote_name = NULL;
     const char *remote_server = NULL;
     const char *remote_port = NULL;
     const char *connect_timeout = "60s";
     const char *remote_allow_migrate=NULL;
     const char *is_managed = NULL;
 
     for (attr_set = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
          attr_set != NULL; attr_set = pcmk__xe_next(attr_set)) {
 
         if (!pcmk__xe_is(attr_set, PCMK_XE_META_ATTRIBUTES)) {
             continue;
         }
 
         for (attr = pcmk__xe_first_child(attr_set, NULL, NULL, NULL);
              attr != NULL; attr = pcmk__xe_next(attr)) {
 
             const char *value = crm_element_value(attr, PCMK_XA_VALUE);
             const char *name = crm_element_value(attr, PCMK_XA_NAME);
 
             if (name == NULL) { // Sanity
                 continue;
             }
 
             if (strcmp(name, PCMK_META_REMOTE_NODE) == 0) {
                 remote_name = value;
 
             } else if (strcmp(name, PCMK_META_REMOTE_ADDR) == 0) {
                 remote_server = value;
 
             } else if (strcmp(name, PCMK_META_REMOTE_PORT) == 0) {
                 remote_port = value;
 
             } else if (strcmp(name, PCMK_META_REMOTE_CONNECT_TIMEOUT) == 0) {
                 connect_timeout = value;
 
             } else if (strcmp(name, PCMK_META_REMOTE_ALLOW_MIGRATE) == 0) {
                 remote_allow_migrate = value;
 
             } else if (strcmp(name, PCMK_META_IS_MANAGED) == 0) {
                 is_managed = value;
             }
         }
     }
 
     if (remote_name == NULL) {
         return NULL;
     }
 
     if (pe_find_resource(data->resources, remote_name) != NULL) {
         return NULL;
     }
 
     pe_create_remote_xml(parent, remote_name, container_id,
                          remote_allow_migrate, is_managed,
                          connect_timeout, remote_server, remote_port);
     return remote_name;
 }
 
 static void
 handle_startup_fencing(pcmk_scheduler_t *scheduler, pcmk_node_t *new_node)
 {
     if ((new_node->private->variant == pcmk__node_variant_remote)
         && (new_node->private->remote == NULL)) {
         /* Ignore fencing for remote nodes that don't have a connection resource
          * associated with them. This happens when remote node entries get left
          * in the nodes section after the connection resource is removed.
          */
         return;
     }
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_startup_fencing)) {
         // All nodes are unclean until we've seen their status entry
         new_node->details->unclean = TRUE;
 
     } else {
         // Blind faith ...
         new_node->details->unclean = FALSE;
     }
 }
 
 gboolean
 unpack_nodes(xmlNode *xml_nodes, pcmk_scheduler_t *scheduler)
 {
     xmlNode *xml_obj = NULL;
     pcmk_node_t *new_node = NULL;
     const char *id = NULL;
     const char *uname = NULL;
     const char *type = NULL;
     const char *score = NULL;
 
     for (xml_obj = pcmk__xe_first_child(xml_nodes, NULL, NULL, NULL);
          xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
 
         if (pcmk__xe_is(xml_obj, PCMK_XE_NODE)) {
             new_node = NULL;
 
             id = crm_element_value(xml_obj, PCMK_XA_ID);
             uname = crm_element_value(xml_obj, PCMK_XA_UNAME);
             type = crm_element_value(xml_obj, PCMK_XA_TYPE);
             score = crm_element_value(xml_obj, PCMK_XA_SCORE);
             crm_trace("Processing node %s/%s", uname, id);
 
             if (id == NULL) {
                 pcmk__config_err("Ignoring <" PCMK_XE_NODE
                                  "> entry in configuration without id");
                 continue;
             }
             new_node = pe_create_node(id, uname, type, score, scheduler);
 
             if (new_node == NULL) {
                 return FALSE;
             }
 
             handle_startup_fencing(scheduler, new_node);
 
             add_node_attrs(xml_obj, new_node, FALSE, scheduler);
 
             crm_trace("Done with node %s",
                       crm_element_value(xml_obj, PCMK_XA_UNAME));
         }
     }
 
     if (scheduler->localhost
         && (pcmk_find_node(scheduler, scheduler->localhost) == NULL)) {
         crm_info("Creating a fake local node");
         pe_create_node(scheduler->localhost, scheduler->localhost, NULL, 0,
                        scheduler);
     }
 
     return TRUE;
 }
 
 static void
 unpack_launcher(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
 {
     const char *launcher_id = NULL;
 
     if (rsc->private->children != NULL) {
         g_list_foreach(rsc->private->children, (GFunc) unpack_launcher,
                        scheduler);
         return;
     }
 
     launcher_id = g_hash_table_lookup(rsc->private->meta, PCMK__META_CONTAINER);
     if ((launcher_id != NULL)
         && !pcmk__str_eq(launcher_id, rsc->id, pcmk__str_none)) {
         pcmk_resource_t *launcher = pe_find_resource(scheduler->resources,
                                                      launcher_id);
 
         if (launcher != NULL) {
             rsc->private->launcher = launcher;
             launcher->private->launched =
                 g_list_append(launcher->private->launched, rsc);
             pcmk__rsc_trace(rsc, "Resource %s's launcher is %s",
                             rsc->id, launcher_id);
         } else {
             pcmk__config_err("Resource %s: Unknown " PCMK__META_CONTAINER " %s",
                              rsc->id, launcher_id);
         }
     }
 }
 
 gboolean
 unpack_remote_nodes(xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
 {
     xmlNode *xml_obj = NULL;
 
     /* Create remote nodes and guest nodes from the resource configuration
      * before unpacking resources.
      */
     for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
          xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
 
         const char *new_node_id = NULL;
 
         /* Check for remote nodes, which are defined by ocf:pacemaker:remote
          * primitives.
          */
         if (xml_contains_remote_node(xml_obj)) {
             new_node_id = pcmk__xe_id(xml_obj);
             /* The pcmk_find_node() check ensures we don't iterate over an
              * expanded node that has already been added to the node list
              */
             if (new_node_id
                 && (pcmk_find_node(scheduler, new_node_id) == NULL)) {
                 crm_trace("Found remote node %s defined by resource %s",
                           new_node_id, pcmk__xe_id(xml_obj));
                 pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
                                NULL, scheduler);
             }
             continue;
         }
 
         /* Check for guest nodes, which are defined by special meta-attributes
          * of a primitive of any type (for example, VirtualDomain or Xen).
          */
         if (pcmk__xe_is(xml_obj, PCMK_XE_PRIMITIVE)) {
             /* This will add an ocf:pacemaker:remote primitive to the
              * configuration for the guest node's connection, to be unpacked
              * later.
              */
             new_node_id = expand_remote_rsc_meta(xml_obj, xml_resources,
                                                  scheduler);
             if (new_node_id
                 && (pcmk_find_node(scheduler, new_node_id) == NULL)) {
                 crm_trace("Found guest node %s in resource %s",
                           new_node_id, pcmk__xe_id(xml_obj));
                 pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
                                NULL, scheduler);
             }
             continue;
         }
 
         /* Check for guest nodes inside a group. Clones are currently not
          * supported as guest nodes.
          */
         if (pcmk__xe_is(xml_obj, PCMK_XE_GROUP)) {
             xmlNode *xml_obj2 = NULL;
             for (xml_obj2 = pcmk__xe_first_child(xml_obj, NULL, NULL, NULL);
                  xml_obj2 != NULL; xml_obj2 = pcmk__xe_next(xml_obj2)) {
 
                 new_node_id = expand_remote_rsc_meta(xml_obj2, xml_resources,
                                                      scheduler);
 
                 if (new_node_id
                     && (pcmk_find_node(scheduler, new_node_id) == NULL)) {
                     crm_trace("Found guest node %s in resource %s inside group %s",
                               new_node_id, pcmk__xe_id(xml_obj2),
                               pcmk__xe_id(xml_obj));
                     pe_create_node(new_node_id, new_node_id, PCMK_VALUE_REMOTE,
                                    NULL, scheduler);
                 }
             }
         }
     }
     return TRUE;
 }
 
 /* Call this after all the nodes and resources have been
  * unpacked, but before the status section is read.
  *
  * A remote node's online status is reflected by the state
  * of the remote node's connection resource. We need to link
  * the remote node to this connection resource so we can have
  * easy access to the connection resource during the scheduler calculations.
  */
 static void
 link_rsc2remotenode(pcmk_scheduler_t *scheduler, pcmk_resource_t *new_rsc)
 {
     pcmk_node_t *remote_node = NULL;
 
     if (!pcmk_is_set(new_rsc->flags, pcmk__rsc_is_remote_connection)) {
         return;
     }
 
     if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
         /* remote_nodes and remote_resources are not linked in quick location calculations */
         return;
     }
 
     remote_node = pcmk_find_node(scheduler, new_rsc->id);
     CRM_CHECK(remote_node != NULL, return);
 
     pcmk__rsc_trace(new_rsc, "Linking remote connection resource %s to %s",
                     new_rsc->id, pcmk__node_name(remote_node));
     remote_node->private->remote = new_rsc;
 
     if (new_rsc->private->launcher == NULL) {
         /* Handle start-up fencing for remote nodes (as opposed to guest nodes)
          * the same as is done for cluster nodes.
          */
         handle_startup_fencing(scheduler, remote_node);
 
     } else {
         /* pe_create_node() marks the new node as "remote" or "cluster"; now
          * that we know the node is a guest node, update it correctly.
          */
         pcmk__insert_dup(remote_node->private->attrs,
                          CRM_ATTR_KIND, "container");
     }
 }
 
 /*!
  * \internal
  * \brief Parse configuration XML for resource information
  *
  * \param[in]     xml_resources  Top of resource configuration XML
  * \param[in,out] scheduler      Scheduler data
  *
  * \return TRUE
  *
  * \note unpack_remote_nodes() MUST be called before this, so that the nodes can
  *       be used when pe__unpack_resource() calls resource_location()
  */
 gboolean
 unpack_resources(const xmlNode *xml_resources, pcmk_scheduler_t *scheduler)
 {
     xmlNode *xml_obj = NULL;
     GList *gIter = NULL;
 
     scheduler->template_rsc_sets = pcmk__strkey_table(free, pcmk__free_idref);
 
     for (xml_obj = pcmk__xe_first_child(xml_resources, NULL, NULL, NULL);
          xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
 
         pcmk_resource_t *new_rsc = NULL;
         const char *id = pcmk__xe_id(xml_obj);
 
         if (pcmk__str_empty(id)) {
             pcmk__config_err("Ignoring <%s> resource without ID",
                              xml_obj->name);
             continue;
         }
 
         if (pcmk__xe_is(xml_obj, PCMK_XE_TEMPLATE)) {
             if (g_hash_table_lookup_extended(scheduler->template_rsc_sets, id,
                                              NULL, NULL) == FALSE) {
                 /* Record the template's ID for the knowledge of its existence anyway. */
                 pcmk__insert_dup(scheduler->template_rsc_sets, id, NULL);
             }
             continue;
         }
 
         crm_trace("Unpacking <%s " PCMK_XA_ID "='%s'>", xml_obj->name, id);
         if (pe__unpack_resource(xml_obj, &new_rsc, NULL,
                                 scheduler) == pcmk_rc_ok) {
             scheduler->resources = g_list_append(scheduler->resources, new_rsc);
             pcmk__rsc_trace(new_rsc, "Added resource %s", new_rsc->id);
 
         } else {
             pcmk__config_err("Ignoring <%s> resource '%s' "
                              "because configuration is invalid",
                              xml_obj->name, id);
         }
     }
 
     for (gIter = scheduler->resources; gIter != NULL; gIter = gIter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
 
         unpack_launcher(rsc, scheduler);
         link_rsc2remotenode(scheduler, rsc);
     }
 
     scheduler->resources = g_list_sort(scheduler->resources,
                                       pe__cmp_rsc_priority);
     if (pcmk_is_set(scheduler->flags, pcmk_sched_location_only)) {
         /* Ignore */
 
     } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
                && !pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
 
         pcmk__config_err("Resource start-up disabled since no STONITH resources have been defined");
         pcmk__config_err("Either configure some or disable STONITH with the "
                          PCMK_OPT_STONITH_ENABLED " option");
         pcmk__config_err("NOTE: Clusters with shared data need STONITH to ensure data integrity");
     }
 
     return TRUE;
 }
 
 gboolean
 unpack_tags(xmlNode *xml_tags, pcmk_scheduler_t *scheduler)
 {
     xmlNode *xml_tag = NULL;
 
     scheduler->tags = pcmk__strkey_table(free, pcmk__free_idref);
 
     for (xml_tag = pcmk__xe_first_child(xml_tags, NULL, NULL, NULL);
          xml_tag != NULL; xml_tag = pcmk__xe_next(xml_tag)) {
 
         xmlNode *xml_obj_ref = NULL;
         const char *tag_id = pcmk__xe_id(xml_tag);
 
         if (!pcmk__xe_is(xml_tag, PCMK_XE_TAG)) {
             continue;
         }
 
         if (tag_id == NULL) {
             pcmk__config_err("Ignoring <%s> without " PCMK_XA_ID,
                              (const char *) xml_tag->name);
             continue;
         }
 
         for (xml_obj_ref = pcmk__xe_first_child(xml_tag, NULL, NULL, NULL);
              xml_obj_ref != NULL; xml_obj_ref = pcmk__xe_next(xml_obj_ref)) {
 
             const char *obj_ref = pcmk__xe_id(xml_obj_ref);
 
             if (!pcmk__xe_is(xml_obj_ref, PCMK_XE_OBJ_REF)) {
                 continue;
             }
 
             if (obj_ref == NULL) {
                 pcmk__config_err("Ignoring <%s> for tag '%s' without " PCMK_XA_ID,
                                  xml_obj_ref->name, tag_id);
                 continue;
             }
 
             pcmk__add_idref(scheduler->tags, tag_id, obj_ref);
         }
     }
 
     return TRUE;
 }
 
 /* The ticket state section:
  * "/cib/status/tickets/ticket_state" */
 static gboolean
 unpack_ticket_state(xmlNode *xml_ticket, pcmk_scheduler_t *scheduler)
 {
     const char *ticket_id = NULL;
     const char *granted = NULL;
     const char *last_granted = NULL;
     const char *standby = NULL;
     xmlAttrPtr xIter = NULL;
 
     pcmk__ticket_t *ticket = NULL;
 
     ticket_id = pcmk__xe_id(xml_ticket);
     if (pcmk__str_empty(ticket_id)) {
         return FALSE;
     }
 
     crm_trace("Processing ticket state for %s", ticket_id);
 
     ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
     if (ticket == NULL) {
         ticket = ticket_new(ticket_id, scheduler);
         if (ticket == NULL) {
             return FALSE;
         }
     }
 
     for (xIter = xml_ticket->properties; xIter; xIter = xIter->next) {
         const char *prop_name = (const char *)xIter->name;
         const char *prop_value = pcmk__xml_attr_value(xIter);
 
         if (pcmk__str_eq(prop_name, PCMK_XA_ID, pcmk__str_none)) {
             continue;
         }
         pcmk__insert_dup(ticket->state, prop_name, prop_value);
     }
 
     granted = g_hash_table_lookup(ticket->state, PCMK__XA_GRANTED);
     if (granted && crm_is_true(granted)) {
         pcmk__set_ticket_flags(ticket, pcmk__ticket_granted);
         crm_info("We have ticket '%s'", ticket->id);
     } else {
         pcmk__clear_ticket_flags(ticket, pcmk__ticket_granted);
         crm_info("We do not have ticket '%s'", ticket->id);
     }
 
     last_granted = g_hash_table_lookup(ticket->state, PCMK_XA_LAST_GRANTED);
     if (last_granted) {
         long long last_granted_ll;
 
         pcmk__scan_ll(last_granted, &last_granted_ll, 0LL);
         ticket->last_granted = (time_t) last_granted_ll;
     }
 
     standby = g_hash_table_lookup(ticket->state, PCMK_XA_STANDBY);
     if (standby && crm_is_true(standby)) {
         pcmk__set_ticket_flags(ticket, pcmk__ticket_standby);
         if (pcmk_is_set(ticket->flags, pcmk__ticket_granted)) {
             crm_info("Granted ticket '%s' is in standby-mode", ticket->id);
         }
     } else {
         pcmk__clear_ticket_flags(ticket, pcmk__ticket_standby);
     }
 
     crm_trace("Done with ticket state for %s", ticket_id);
 
     return TRUE;
 }
 
 static gboolean
 unpack_tickets_state(xmlNode *xml_tickets, pcmk_scheduler_t *scheduler)
 {
     xmlNode *xml_obj = NULL;
 
     for (xml_obj = pcmk__xe_first_child(xml_tickets, NULL, NULL, NULL);
          xml_obj != NULL; xml_obj = pcmk__xe_next(xml_obj)) {
 
         if (!pcmk__xe_is(xml_obj, PCMK__XE_TICKET_STATE)) {
             continue;
         }
         unpack_ticket_state(xml_obj, scheduler);
     }
 
     return TRUE;
 }
 
 static void
 unpack_handle_remote_attrs(pcmk_node_t *this_node, const xmlNode *state,
                            pcmk_scheduler_t *scheduler)
 {
     const char *discovery = NULL;
     const xmlNode *attrs = NULL;
     pcmk_resource_t *rsc = NULL;
     int maint = 0;
 
     if (!pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
         return;
     }
 
     if ((this_node == NULL) || !pcmk__is_pacemaker_remote_node(this_node)) {
         return;
     }
     crm_trace("Processing Pacemaker Remote node %s",
               pcmk__node_name(this_node));
 
     pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_IN_MAINTENANCE),
                        &maint, 0);
     if (maint) {
         pcmk__set_node_flags(this_node, pcmk__node_remote_maint);
     } else {
         pcmk__clear_node_flags(this_node, pcmk__node_remote_maint);
     }
 
     rsc = this_node->private->remote;
     if (!pcmk_is_set(this_node->private->flags, pcmk__node_remote_reset)) {
         this_node->details->unclean = FALSE;
         pcmk__set_node_flags(this_node, pcmk__node_seen);
     }
     attrs = pcmk__xe_first_child(state, PCMK__XE_TRANSIENT_ATTRIBUTES, NULL,
                                  NULL);
     add_node_attrs(attrs, this_node, TRUE, scheduler);
 
     if (pe__shutdown_requested(this_node)) {
         crm_info("%s is shutting down", pcmk__node_name(this_node));
         this_node->details->shutdown = TRUE;
     }
 
     if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_STANDBY, NULL,
                                     pcmk__rsc_node_current))) {
         crm_info("%s is in standby mode", pcmk__node_name(this_node));
         pcmk__set_node_flags(this_node, pcmk__node_standby);
     }
 
     if (crm_is_true(pcmk__node_attr(this_node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
                                     pcmk__rsc_node_current))
         || ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_managed))) {
         crm_info("%s is in maintenance mode", pcmk__node_name(this_node));
         this_node->details->maintenance = TRUE;
     }
 
     discovery = pcmk__node_attr(this_node,
                                 PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
                                 NULL, pcmk__rsc_node_current);
     if ((discovery != NULL) && !crm_is_true(discovery)) {
         pcmk__warn_once(pcmk__wo_rdisc_enabled,
                         "Support for the "
                         PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
                         " node attribute is deprecated and will be removed"
                         " (and behave as 'true') in a future release.");
 
         if (pcmk__is_remote_node(this_node)
             && !pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
             pcmk__config_warn("Ignoring "
                               PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
                               " attribute on Pacemaker Remote node %s"
                               " because fencing is disabled",
                               pcmk__node_name(this_node));
         } else {
             /* This is either a remote node with fencing enabled, or a guest
              * node. We don't care whether fencing is enabled when fencing guest
              * nodes, because they are "fenced" by recovering their containing
              * resource.
              */
             crm_info("%s has resource discovery disabled",
                      pcmk__node_name(this_node));
             pcmk__clear_node_flags(this_node, pcmk__node_probes_allowed);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Unpack a cluster node's transient attributes
  *
  * \param[in]     state      CIB node state XML
  * \param[in,out] node       Cluster node whose attributes are being unpacked
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 unpack_transient_attributes(const xmlNode *state, pcmk_node_t *node,
                             pcmk_scheduler_t *scheduler)
 {
     const char *discovery = NULL;
     const xmlNode *attrs = pcmk__xe_first_child(state,
                                                 PCMK__XE_TRANSIENT_ATTRIBUTES,
                                                 NULL, NULL);
 
     add_node_attrs(attrs, node, TRUE, scheduler);
 
     if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_STANDBY, NULL,
                                     pcmk__rsc_node_current))) {
         crm_info("%s is in standby mode", pcmk__node_name(node));
         pcmk__set_node_flags(node, pcmk__node_standby);
     }
 
     if (crm_is_true(pcmk__node_attr(node, PCMK_NODE_ATTR_MAINTENANCE, NULL,
                                     pcmk__rsc_node_current))) {
         crm_info("%s is in maintenance mode", pcmk__node_name(node));
         node->details->maintenance = TRUE;
     }
 
     discovery = pcmk__node_attr(node,
                                 PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED,
                                 NULL, pcmk__rsc_node_current);
     if ((discovery != NULL) && !crm_is_true(discovery)) {
         pcmk__config_warn("Ignoring "
                           PCMK__NODE_ATTR_RESOURCE_DISCOVERY_ENABLED
                           " attribute for %s because disabling resource"
                           " discovery is not allowed for cluster nodes",
                           pcmk__node_name(node));
     }
 }
 
 /*!
  * \internal
  * \brief Unpack a node state entry (first pass)
  *
  * Unpack one node state entry from status. This unpacks information from the
  * \C PCMK__XE_NODE_STATE element itself and node attributes inside it, but not
  * the resource history inside it. Multiple passes through the status are needed
  * to fully unpack everything.
  *
  * \param[in]     state      CIB node state XML
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 unpack_node_state(const xmlNode *state, pcmk_scheduler_t *scheduler)
 {
     const char *id = NULL;
     const char *uname = NULL;
     pcmk_node_t *this_node = NULL;
 
     id = crm_element_value(state, PCMK_XA_ID);
     if (id == NULL) {
         pcmk__config_err("Ignoring invalid " PCMK__XE_NODE_STATE " entry without "
                          PCMK_XA_ID);
         crm_log_xml_info(state, "missing-id");
         return;
     }
 
     uname = crm_element_value(state, PCMK_XA_UNAME);
     if (uname == NULL) {
         /* If a joining peer makes the cluster acquire the quorum from corosync
          * meanwhile it has not joined CPG membership of pacemaker-controld yet,
          * it's possible that the created PCMK__XE_NODE_STATE entry doesn't have
          * a PCMK_XA_UNAME yet. We should recognize the node as `pending` and
          * wait for it to join CPG.
          */
         crm_trace("Handling " PCMK__XE_NODE_STATE " entry with id=\"%s\" "
                   "without " PCMK_XA_UNAME,
                   id);
     }
 
     this_node = pe_find_node_any(scheduler->nodes, id, uname);
     if (this_node == NULL) {
         crm_notice("Ignoring recorded state for removed node with name %s and "
                    PCMK_XA_ID " %s", pcmk__s(uname, "unknown"), id);
         return;
     }
 
     if (pcmk__is_pacemaker_remote_node(this_node)) {
         int remote_fenced = 0;
 
         /* We can't determine the online status of Pacemaker Remote nodes until
          * after all resource history has been unpacked. In this first pass, we
          * do need to mark whether the node has been fenced, as this plays a
          * role during unpacking cluster node resource state.
          */
         pcmk__scan_min_int(crm_element_value(state, PCMK__XA_NODE_FENCED),
                            &remote_fenced, 0);
         if (remote_fenced) {
             pcmk__set_node_flags(this_node, pcmk__node_remote_fenced);
         } else {
             pcmk__clear_node_flags(this_node, pcmk__node_remote_fenced);
         }
         return;
     }
 
     unpack_transient_attributes(state, this_node, scheduler);
 
     /* Provisionally mark this cluster node as clean. We have at least seen it
      * in the current cluster's lifetime.
      */
     this_node->details->unclean = FALSE;
     pcmk__set_node_flags(this_node, pcmk__node_seen);
 
     crm_trace("Determining online status of cluster node %s (id %s)",
               pcmk__node_name(this_node), id);
     determine_online_status(state, this_node, scheduler);
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_quorate)
         && this_node->details->online
         && (scheduler->no_quorum_policy == pcmk_no_quorum_fence)) {
         /* Everything else should flow from this automatically
          * (at least until the scheduler becomes able to migrate off
          * healthy resources)
          */
         pe_fence_node(scheduler, this_node, "cluster does not have quorum",
                       FALSE);
     }
 }
 
 /*!
  * \internal
  * \brief Unpack nodes' resource history as much as possible
  *
  * Unpack as many nodes' resource history as possible in one pass through the
  * status. We need to process Pacemaker Remote nodes' connections/containers
  * before unpacking their history; the connection/container history will be
  * in another node's history, so it might take multiple passes to unpack
  * everything.
  *
  * \param[in]     status     CIB XML status section
  * \param[in]     fence      If true, treat any not-yet-unpacked nodes as unseen
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Standard Pacemaker return code (specifically pcmk_rc_ok if done,
  *         or EAGAIN if more unpacking remains to be done)
  */
 static int
 unpack_node_history(const xmlNode *status, bool fence,
                     pcmk_scheduler_t *scheduler)
 {
     int rc = pcmk_rc_ok;
 
     // Loop through all PCMK__XE_NODE_STATE entries in CIB status
     for (const xmlNode *state = pcmk__xe_first_child(status,
                                                      PCMK__XE_NODE_STATE, NULL,
                                                      NULL);
          state != NULL; state = pcmk__xe_next_same(state)) {
 
         const char *id = pcmk__xe_id(state);
         const char *uname = crm_element_value(state, PCMK_XA_UNAME);
         pcmk_node_t *this_node = NULL;
 
         if ((id == NULL) || (uname == NULL)) {
             // Warning already logged in first pass through status section
             crm_trace("Not unpacking resource history from malformed "
                       PCMK__XE_NODE_STATE " without id and/or uname");
             continue;
         }
 
         this_node = pe_find_node_any(scheduler->nodes, id, uname);
         if (this_node == NULL) {
             // Warning already logged in first pass through status section
             crm_trace("Not unpacking resource history for node %s because "
                       "no longer in configuration", id);
             continue;
         }
 
         if (pcmk_is_set(this_node->private->flags, pcmk__node_unpacked)) {
             crm_trace("Not unpacking resource history for node %s because "
                       "already unpacked", id);
             continue;
         }
 
         if (fence) {
             // We're processing all remaining nodes
 
         } else if (pcmk__is_guest_or_bundle_node(this_node)) {
             /* We can unpack a guest node's history only after we've unpacked
              * other resource history to the point that we know that the node's
              * connection and containing resource are both up.
              */
             const pcmk_resource_t *remote = this_node->private->remote;
             const pcmk_resource_t *launcher = remote->private->launcher;
 
             if ((remote->private->orig_role != pcmk_role_started)
                 || (launcher->private->orig_role != pcmk_role_started)) {
                 crm_trace("Not unpacking resource history for guest node %s "
                           "because launcher and connection are not known to "
                           "be up", id);
                 continue;
             }
 
         } else if (pcmk__is_remote_node(this_node)) {
             /* We can unpack a remote node's history only after we've unpacked
              * other resource history to the point that we know that the node's
              * connection is up, with the exception of when shutdown locks are
              * in use.
              */
             pcmk_resource_t *rsc = this_node->private->remote;
 
             if ((rsc == NULL)
                 || (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)
                     && (rsc->private->orig_role != pcmk_role_started))) {
                 crm_trace("Not unpacking resource history for remote node %s "
                           "because connection is not known to be up", id);
                 continue;
             }
 
         /* If fencing and shutdown locks are disabled and we're not processing
          * unseen nodes, then we don't want to unpack offline nodes until online
          * nodes have been unpacked. This allows us to number active clone
          * instances first.
          */
         } else if (!pcmk_any_flags_set(scheduler->flags,
                                        pcmk_sched_fencing_enabled
                                        |pcmk_sched_shutdown_lock)
                    && !this_node->details->online) {
             crm_trace("Not unpacking resource history for offline "
                       "cluster node %s", id);
             continue;
         }
 
         if (pcmk__is_pacemaker_remote_node(this_node)) {
             determine_remote_online_status(scheduler, this_node);
             unpack_handle_remote_attrs(this_node, state, scheduler);
         }
 
         crm_trace("Unpacking resource history for %snode %s",
                   (fence? "unseen " : ""), id);
 
         pcmk__set_node_flags(this_node, pcmk__node_unpacked);
         unpack_node_lrm(this_node, state, scheduler);
 
         rc = EAGAIN; // Other node histories might depend on this one
     }
     return rc;
 }
 
 /* remove nodes that are down, stopping */
 /* create positive rsc_to_node constraints between resources and the nodes they are running on */
 /* anything else? */
 gboolean
 unpack_status(xmlNode *status, pcmk_scheduler_t *scheduler)
 {
     xmlNode *state = NULL;
 
     crm_trace("Beginning unpack");
 
     if (scheduler->tickets == NULL) {
         scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
     }
 
     for (state = pcmk__xe_first_child(status, NULL, NULL, NULL); state != NULL;
          state = pcmk__xe_next(state)) {
 
         if (pcmk__xe_is(state, PCMK_XE_TICKETS)) {
             unpack_tickets_state((xmlNode *) state, scheduler);
 
         } else if (pcmk__xe_is(state, PCMK__XE_NODE_STATE)) {
             unpack_node_state(state, scheduler);
         }
     }
 
     while (unpack_node_history(status, FALSE, scheduler) == EAGAIN) {
         crm_trace("Another pass through node resource histories is needed");
     }
 
     // Now catch any nodes we didn't see
     unpack_node_history(status,
                         pcmk_is_set(scheduler->flags,
                                     pcmk_sched_fencing_enabled),
                         scheduler);
 
     /* Now that we know where resources are, we can schedule stops of containers
      * with failed bundle connections
      */
     if (scheduler->stop_needed != NULL) {
         for (GList *item = scheduler->stop_needed; item; item = item->next) {
             pcmk_resource_t *container = item->data;
             pcmk_node_t *node = pcmk__current_node(container);
 
             if (node) {
                 stop_action(container, node, FALSE);
             }
         }
         g_list_free(scheduler->stop_needed);
         scheduler->stop_needed = NULL;
     }
 
     /* Now that we know status of all Pacemaker Remote connections and nodes,
      * we can stop connections for node shutdowns, and check the online status
      * of remote/guest nodes that didn't have any node history to unpack.
      */
     for (GList *gIter = scheduler->nodes; gIter != NULL; gIter = gIter->next) {
         pcmk_node_t *this_node = gIter->data;
 
         if (!pcmk__is_pacemaker_remote_node(this_node)) {
             continue;
         }
         if (this_node->details->shutdown
             && (this_node->private->remote != NULL)) {
             pe__set_next_role(this_node->private->remote, pcmk_role_stopped,
                               "remote shutdown");
         }
         if (!pcmk_is_set(this_node->private->flags, pcmk__node_unpacked)) {
             determine_remote_online_status(scheduler, this_node);
         }
     }
 
     return TRUE;
 }
 
 /*!
  * \internal
  * \brief Unpack node's time when it became a member at the cluster layer
  *
  * \param[in]     node_state  Node's \c PCMK__XE_NODE_STATE entry
  * \param[in,out] scheduler   Scheduler data
  *
  * \return Epoch time when node became a cluster member
  *         (or scheduler effective time for legacy entries) if a member,
  *         0 if not a member, or -1 if no valid information available
  */
 static long long
 unpack_node_member(const xmlNode *node_state, pcmk_scheduler_t *scheduler)
 {
     const char *member_time = crm_element_value(node_state, PCMK__XA_IN_CCM);
     int member = 0;
 
     if (member_time == NULL) {
         return -1LL;
 
     } else if (crm_str_to_boolean(member_time, &member) == 1) {
         /* If in_ccm=0, we'll return 0 here. If in_ccm=1, either the entry was
          * recorded as a boolean for a DC < 2.1.7, or the node is pending
          * shutdown and has left the CPG, in which case it was set to 1 to avoid
          * fencing for PCMK_OPT_NODE_PENDING_TIMEOUT.
          *
          * We return the effective time for in_ccm=1 because what's important to
          * avoid fencing is that effective time minus this value is less than
          * the pending node timeout.
          */
         return member? (long long) get_effective_time(scheduler) : 0LL;
 
     } else {
         long long when_member = 0LL;
 
         if ((pcmk__scan_ll(member_time, &when_member,
                            0LL) != pcmk_rc_ok) || (when_member < 0LL)) {
             crm_warn("Unrecognized value '%s' for " PCMK__XA_IN_CCM
                      " in " PCMK__XE_NODE_STATE " entry", member_time);
             return -1LL;
         }
         return when_member;
     }
 }
 
 /*!
  * \internal
  * \brief Unpack node's time when it became online in process group
  *
  * \param[in] node_state  Node's \c PCMK__XE_NODE_STATE entry
  *
  * \return Epoch time when node became online in process group (or 0 if not
  *         online, or 1 for legacy online entries)
  */
 static long long
 unpack_node_online(const xmlNode *node_state)
 {
     const char *peer_time = crm_element_value(node_state, PCMK_XA_CRMD);
 
     // @COMPAT Entries recorded for DCs < 2.1.7 have "online" or "offline"
     if (pcmk__str_eq(peer_time, PCMK_VALUE_OFFLINE,
                      pcmk__str_casei|pcmk__str_null_matches)) {
         return 0LL;
 
     } else if (pcmk__str_eq(peer_time, PCMK_VALUE_ONLINE, pcmk__str_casei)) {
         return 1LL;
 
     } else {
         long long when_online = 0LL;
 
         if ((pcmk__scan_ll(peer_time, &when_online, 0LL) != pcmk_rc_ok)
             || (when_online < 0)) {
             crm_warn("Unrecognized value '%s' for " PCMK_XA_CRMD " in "
                      PCMK__XE_NODE_STATE " entry, assuming offline", peer_time);
             return 0LL;
         }
         return when_online;
     }
 }
 
 /*!
  * \internal
  * \brief Unpack node attribute for user-requested fencing
  *
  * \param[in] node        Node to check
  * \param[in] node_state  Node's \c PCMK__XE_NODE_STATE entry in CIB status
  *
  * \return \c true if fencing has been requested for \p node, otherwise \c false
  */
 static bool
 unpack_node_terminate(const pcmk_node_t *node, const xmlNode *node_state)
 {
     long long value = 0LL;
     int value_i = 0;
     const char *value_s = pcmk__node_attr(node, PCMK_NODE_ATTR_TERMINATE,
                                           NULL, pcmk__rsc_node_current);
 
     // Value may be boolean or an epoch time
     if (crm_str_to_boolean(value_s, &value_i) == 1) {
         return (value_i != 0);
     }
     if (pcmk__scan_ll(value_s, &value, 0LL) == pcmk_rc_ok) {
         return (value > 0);
     }
     crm_warn("Ignoring unrecognized value '%s' for " PCMK_NODE_ATTR_TERMINATE
              "node attribute for %s", value_s, pcmk__node_name(node));
     return false;
 }
 
 static gboolean
 determine_online_status_no_fencing(pcmk_scheduler_t *scheduler,
                                    const xmlNode *node_state,
                                    pcmk_node_t *this_node)
 {
     gboolean online = FALSE;
     const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
     const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
     long long when_member = unpack_node_member(node_state, scheduler);
     long long when_online = unpack_node_online(node_state);
 
     if (when_member <= 0) {
         crm_trace("Node %s is %sdown", pcmk__node_name(this_node),
                   ((when_member < 0)? "presumed " : ""));
 
     } else if (when_online > 0) {
         if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
             online = TRUE;
         } else {
             crm_debug("Node %s is not ready to run resources: %s",
                       pcmk__node_name(this_node), join);
         }
 
     } else if (!pcmk_is_set(this_node->private->flags,
                             pcmk__node_expected_up)) {
         crm_trace("Node %s controller is down: "
                   "member@%lld online@%lld join=%s expected=%s",
                   pcmk__node_name(this_node), when_member, when_online,
                   pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
 
     } else {
         /* mark it unclean */
         pe_fence_node(scheduler, this_node, "peer is unexpectedly down", FALSE);
         crm_info("Node %s member@%lld online@%lld join=%s expected=%s",
                  pcmk__node_name(this_node), when_member, when_online,
                  pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"));
     }
     return online;
 }
 
 /*!
  * \internal
  * \brief Check whether a node has taken too long to join controller group
  *
  * \param[in,out] scheduler    Scheduler data
  * \param[in]     node         Node to check
  * \param[in]     when_member  Epoch time when node became a cluster member
  * \param[in]     when_online  Epoch time when node joined controller group
  *
  * \return true if node has been pending (on the way up) longer than
  *         \c PCMK_OPT_NODE_PENDING_TIMEOUT, otherwise false
  * \note This will also update the cluster's recheck time if appropriate.
  */
 static inline bool
 pending_too_long(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
                  long long when_member, long long when_online)
 {
     if ((scheduler->node_pending_timeout > 0)
         && (when_member > 0) && (when_online <= 0)) {
         // There is a timeout on pending nodes, and node is pending
 
         time_t timeout = when_member + scheduler->node_pending_timeout;
 
         if (get_effective_time(node->private->scheduler) >= timeout) {
             return true; // Node has timed out
         }
 
         // Node is pending, but still has time
         pe__update_recheck_time(timeout, scheduler, "pending node timeout");
     }
     return false;
 }
 
 static bool
 determine_online_status_fencing(pcmk_scheduler_t *scheduler,
                                 const xmlNode *node_state,
                                 pcmk_node_t *this_node)
 {
     bool termination_requested = unpack_node_terminate(this_node, node_state);
     const char *join = crm_element_value(node_state, PCMK__XA_JOIN);
     const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
     long long when_member = unpack_node_member(node_state, scheduler);
     long long when_online = unpack_node_online(node_state);
 
 /*
   - PCMK__XA_JOIN          ::= member|down|pending|banned
   - PCMK_XA_EXPECTED       ::= member|down
 
   @COMPAT with entries recorded for DCs < 2.1.7
   - PCMK__XA_IN_CCM        ::= true|false
   - PCMK_XA_CRMD           ::= online|offline
 
   Since crm_feature_set 3.18.0 (pacemaker-2.1.7):
   - PCMK__XA_IN_CCM        ::= <timestamp>|0
   Since when node has been a cluster member. A value 0 of means the node is not
   a cluster member.
 
   - PCMK_XA_CRMD           ::= <timestamp>|0
   Since when peer has been online in CPG. A value 0 means the peer is offline
   in CPG.
 */
 
     crm_trace("Node %s member@%lld online@%lld join=%s expected=%s%s",
               pcmk__node_name(this_node), when_member, when_online,
               pcmk__s(join, "<null>"), pcmk__s(exp_state, "<null>"),
               (termination_requested? " (termination requested)" : ""));
 
     if (this_node->details->shutdown) {
         crm_debug("%s is shutting down", pcmk__node_name(this_node));
 
         /* Slightly different criteria since we can't shut down a dead peer */
         return (when_online > 0);
     }
 
     if (when_member < 0) {
         pe_fence_node(scheduler, this_node,
                       "peer has not been seen by the cluster", FALSE);
         return false;
     }
 
     if (pcmk__str_eq(join, CRMD_JOINSTATE_NACK, pcmk__str_none)) {
         pe_fence_node(scheduler, this_node,
                       "peer failed Pacemaker membership criteria", FALSE);
 
     } else if (termination_requested) {
         if ((when_member <= 0) && (when_online <= 0)
             && pcmk__str_eq(join, CRMD_JOINSTATE_DOWN, pcmk__str_none)) {
             crm_info("%s was fenced as requested", pcmk__node_name(this_node));
             return false;
         }
         pe_fence_node(scheduler, this_node, "fencing was requested", false);
 
     } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_DOWN,
                             pcmk__str_null_matches)) {
 
         if (pending_too_long(scheduler, this_node, when_member, when_online)) {
             pe_fence_node(scheduler, this_node,
                           "peer pending timed out on joining the process group",
                           FALSE);
 
         } else if ((when_member > 0) || (when_online > 0)) {
             crm_info("- %s is not ready to run resources",
                      pcmk__node_name(this_node));
             pcmk__set_node_flags(this_node, pcmk__node_standby);
             this_node->details->pending = TRUE;
 
         } else {
             crm_trace("%s is down or still coming up",
                       pcmk__node_name(this_node));
         }
 
     } else if (when_member <= 0) {
         // Consider PCMK_OPT_PRIORITY_FENCING_DELAY for lost nodes
         pe_fence_node(scheduler, this_node,
                       "peer is no longer part of the cluster", TRUE);
 
     } else if (when_online <= 0) {
         pe_fence_node(scheduler, this_node,
                       "peer process is no longer available", FALSE);
 
         /* Everything is running at this point, now check join state */
 
     } else if (pcmk__str_eq(join, CRMD_JOINSTATE_MEMBER, pcmk__str_none)) {
         crm_info("%s is active", pcmk__node_name(this_node));
 
     } else if (pcmk__str_any_of(join, CRMD_JOINSTATE_PENDING,
                                 CRMD_JOINSTATE_DOWN, NULL)) {
         crm_info("%s is not ready to run resources",
                  pcmk__node_name(this_node));
         pcmk__set_node_flags(this_node, pcmk__node_standby);
         this_node->details->pending = TRUE;
 
     } else {
         pe_fence_node(scheduler, this_node, "peer was in an unknown state",
                       FALSE);
     }
 
     return (when_member > 0);
 }
 
 static void
 determine_remote_online_status(pcmk_scheduler_t *scheduler,
                                pcmk_node_t *this_node)
 {
     pcmk_resource_t *rsc = this_node->private->remote;
     pcmk_resource_t *launcher = NULL;
     pcmk_node_t *host = NULL;
     const char *node_type = "Remote";
 
     if (rsc == NULL) {
         /* This is a leftover node state entry for a former Pacemaker Remote
          * node whose connection resource was removed. Consider it offline.
          */
         crm_trace("Pacemaker Remote node %s is considered OFFLINE because "
                   "its connection resource has been removed from the CIB",
                   this_node->private->id);
         this_node->details->online = FALSE;
         return;
     }
 
     launcher = rsc->private->launcher;
     if (launcher != NULL) {
         node_type = "Guest";
         if (pcmk__list_of_1(rsc->private->active_nodes)) {
             host = rsc->private->active_nodes->data;
         }
     }
 
     /* If the resource is currently started, mark it online. */
     if (rsc->private->orig_role == pcmk_role_started) {
         this_node->details->online = TRUE;
     }
 
     /* consider this node shutting down if transitioning start->stop */
     if ((rsc->private->orig_role == pcmk_role_started)
         && (rsc->private->next_role == pcmk_role_stopped)) {
 
         crm_trace("%s node %s shutting down because connection resource is stopping",
                   node_type, this_node->private->id);
         this_node->details->shutdown = TRUE;
     }
 
     /* Now check all the failure conditions. */
     if ((launcher != NULL) && pcmk_is_set(launcher->flags, pcmk__rsc_failed)) {
         crm_trace("Guest node %s UNCLEAN because guest resource failed",
                   this_node->private->id);
         this_node->details->online = FALSE;
         pcmk__set_node_flags(this_node, pcmk__node_remote_reset);
 
     } else if (pcmk_is_set(rsc->flags, pcmk__rsc_failed)) {
         crm_trace("%s node %s OFFLINE because connection resource failed",
                   node_type, this_node->private->id);
         this_node->details->online = FALSE;
 
     } else if ((rsc->private->orig_role == pcmk_role_stopped)
                || ((launcher != NULL)
                    && (launcher->private->orig_role == pcmk_role_stopped))) {
 
         crm_trace("%s node %s OFFLINE because its resource is stopped",
                   node_type, this_node->private->id);
         this_node->details->online = FALSE;
         pcmk__clear_node_flags(this_node, pcmk__node_remote_reset);
 
     } else if (host && (host->details->online == FALSE)
                && host->details->unclean) {
         crm_trace("Guest node %s UNCLEAN because host is unclean",
                   this_node->private->id);
         this_node->details->online = FALSE;
         pcmk__set_node_flags(this_node, pcmk__node_remote_reset);
 
     } else {
         crm_trace("%s node %s is %s",
                   node_type, this_node->private->id,
                   this_node->details->online? "ONLINE" : "OFFLINE");
     }
 }
 
 static void
 determine_online_status(const xmlNode *node_state, pcmk_node_t *this_node,
                         pcmk_scheduler_t *scheduler)
 {
     gboolean online = FALSE;
     const char *exp_state = crm_element_value(node_state, PCMK_XA_EXPECTED);
 
     CRM_CHECK(this_node != NULL, return);
 
     this_node->details->shutdown = FALSE;
 
     if (pe__shutdown_requested(this_node)) {
         this_node->details->shutdown = TRUE;
 
     } else if (pcmk__str_eq(exp_state, CRMD_JOINSTATE_MEMBER, pcmk__str_casei)) {
         pcmk__set_node_flags(this_node, pcmk__node_expected_up);
     }
 
     if (this_node->private->variant == pcmk__node_variant_ping) {
         this_node->details->unclean = FALSE;
         online = FALSE;         /* As far as resource management is concerned,
                                  * the node is safely offline.
                                  * Anyone caught abusing this logic will be shot
                                  */
 
     } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
         online = determine_online_status_no_fencing(scheduler, node_state,
                                                     this_node);
 
     } else {
         online = determine_online_status_fencing(scheduler, node_state,
                                                  this_node);
     }
 
     if (online) {
         this_node->details->online = TRUE;
 
     } else {
         /* remove node from contention */
         this_node->assign->score = -PCMK_SCORE_INFINITY;
     }
 
     if (online && this_node->details->shutdown) {
         /* don't run resources here */
         this_node->assign->score = -PCMK_SCORE_INFINITY;
     }
 
     if (this_node->private->variant == pcmk__node_variant_ping) {
         crm_info("%s is not a Pacemaker node", pcmk__node_name(this_node));
 
     } else if (this_node->details->unclean) {
-        pcmk__sched_warn("%s is unclean", pcmk__node_name(this_node));
+        pcmk__sched_warn(scheduler, "%s is unclean",
+                         pcmk__node_name(this_node));
 
     } else if (!this_node->details->online) {
         crm_trace("%s is offline", pcmk__node_name(this_node));
 
     } else if (this_node->details->shutdown) {
         crm_info("%s is shutting down", pcmk__node_name(this_node));
 
     } else if (this_node->details->pending) {
         crm_info("%s is pending", pcmk__node_name(this_node));
 
     } else if (pcmk_is_set(this_node->private->flags, pcmk__node_standby)) {
         crm_info("%s is in standby", pcmk__node_name(this_node));
 
     } else if (this_node->details->maintenance) {
         crm_info("%s is in maintenance", pcmk__node_name(this_node));
 
     } else {
         crm_info("%s is online", pcmk__node_name(this_node));
     }
 }
 
 /*!
  * \internal
  * \brief Find the end of a resource's name, excluding any clone suffix
  *
  * \param[in] id  Resource ID to check
  *
  * \return Pointer to last character of resource's base name
  */
 const char *
 pe_base_name_end(const char *id)
 {
     if (!pcmk__str_empty(id)) {
         const char *end = id + strlen(id) - 1;
 
         for (const char *s = end; s > id; --s) {
             switch (*s) {
                 case '0':
                 case '1':
                 case '2':
                 case '3':
                 case '4':
                 case '5':
                 case '6':
                 case '7':
                 case '8':
                 case '9':
                     break;
                 case ':':
                     return (s == end)? s : (s - 1);
                 default:
                     return end;
             }
         }
         return end;
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Get a resource name excluding any clone suffix
  *
  * \param[in] last_rsc_id  Resource ID to check
  *
  * \return Pointer to newly allocated string with resource's base name
  * \note It is the caller's responsibility to free() the result.
  *       This asserts on error, so callers can assume result is not NULL.
  */
 char *
 clone_strip(const char *last_rsc_id)
 {
     const char *end = pe_base_name_end(last_rsc_id);
     char *basename = NULL;
 
     CRM_ASSERT(end);
     basename = strndup(last_rsc_id, end - last_rsc_id + 1);
     CRM_ASSERT(basename);
     return basename;
 }
 
 /*!
  * \internal
  * \brief Get the name of the first instance of a cloned resource
  *
  * \param[in] last_rsc_id  Resource ID to check
  *
  * \return Pointer to newly allocated string with resource's base name plus :0
  * \note It is the caller's responsibility to free() the result.
  *       This asserts on error, so callers can assume result is not NULL.
  */
 char *
 clone_zero(const char *last_rsc_id)
 {
     const char *end = pe_base_name_end(last_rsc_id);
     size_t base_name_len = end - last_rsc_id + 1;
     char *zero = NULL;
 
     CRM_ASSERT(end);
     zero = pcmk__assert_alloc(base_name_len + 3, sizeof(char));
     memcpy(zero, last_rsc_id, base_name_len);
     zero[base_name_len] = ':';
     zero[base_name_len + 1] = '0';
     return zero;
 }
 
 static pcmk_resource_t *
 create_fake_resource(const char *rsc_id, const xmlNode *rsc_entry,
                      pcmk_scheduler_t *scheduler)
 {
     pcmk_resource_t *rsc = NULL;
     xmlNode *xml_rsc = pcmk__xe_create(NULL, PCMK_XE_PRIMITIVE);
 
     pcmk__xe_copy_attrs(xml_rsc, rsc_entry, pcmk__xaf_none);
     crm_xml_add(xml_rsc, PCMK_XA_ID, rsc_id);
     crm_log_xml_debug(xml_rsc, "Orphan resource");
 
     if (pe__unpack_resource(xml_rsc, &rsc, NULL, scheduler) != pcmk_rc_ok) {
         return NULL;
     }
 
     if (xml_contains_remote_node(xml_rsc)) {
         pcmk_node_t *node;
 
         crm_debug("Detected orphaned remote node %s", rsc_id);
         node = pcmk_find_node(scheduler, rsc_id);
         if (node == NULL) {
             node = pe_create_node(rsc_id, rsc_id, PCMK_VALUE_REMOTE, NULL,
                                   scheduler);
         }
         link_rsc2remotenode(scheduler, rsc);
 
         if (node) {
             crm_trace("Setting node %s as shutting down due to orphaned connection resource", rsc_id);
             node->details->shutdown = TRUE;
         }
     }
 
     if (crm_element_value(rsc_entry, PCMK__META_CONTAINER)) {
         // This removed resource needs to be mapped to a launcher
         crm_trace("Launched resource %s was removed from the configuration",
                   rsc_id);
         pcmk__set_rsc_flags(rsc, pcmk__rsc_removed_launched);
     }
     pcmk__set_rsc_flags(rsc, pcmk__rsc_removed);
     scheduler->resources = g_list_append(scheduler->resources, rsc);
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Create orphan instance for anonymous clone resource history
  *
  * \param[in,out] parent     Clone resource that orphan will be added to
  * \param[in]     rsc_id     Orphan's resource ID
  * \param[in]     node       Where orphan is active (for logging only)
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Newly added orphaned instance of \p parent
  */
 static pcmk_resource_t *
 create_anonymous_orphan(pcmk_resource_t *parent, const char *rsc_id,
                         const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
 {
     pcmk_resource_t *top = pe__create_clone_child(parent, scheduler);
     pcmk_resource_t *orphan = NULL;
 
     // find_rsc() because we might be a cloned group
     orphan = top->private->fns->find_rsc(top, rsc_id, NULL,
                                          pcmk_rsc_match_clone_only);
 
     pcmk__rsc_debug(parent, "Created orphan %s for %s: %s on %s",
                     top->id, parent->id, rsc_id, pcmk__node_name(node));
     return orphan;
 }
 
 /*!
  * \internal
  * \brief Check a node for an instance of an anonymous clone
  *
  * Return a child instance of the specified anonymous clone, in order of
  * preference: (1) the instance running on the specified node, if any;
  * (2) an inactive instance (i.e. within the total of \c PCMK_META_CLONE_MAX
  * instances); (3) a newly created orphan (that is, \c PCMK_META_CLONE_MAX
  * instances are already active).
  *
  * \param[in,out] scheduler  Scheduler data
  * \param[in]     node       Node on which to check for instance
  * \param[in,out] parent     Clone to check
  * \param[in]     rsc_id     Name of cloned resource in history (no instance)
  */
 static pcmk_resource_t *
 find_anonymous_clone(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
                      pcmk_resource_t *parent, const char *rsc_id)
 {
     GList *rIter = NULL;
     pcmk_resource_t *rsc = NULL;
     pcmk_resource_t *inactive_instance = NULL;
     gboolean skip_inactive = FALSE;
 
     CRM_ASSERT(pcmk__is_anonymous_clone(parent));
 
     // Check for active (or partially active, for cloned groups) instance
     pcmk__rsc_trace(parent, "Looking for %s on %s in %s",
                     rsc_id, pcmk__node_name(node), parent->id);
 
     for (rIter = parent->private->children;
          (rIter != NULL) && (rsc == NULL); rIter = rIter->next) {
 
         GList *locations = NULL;
         pcmk_resource_t *child = rIter->data;
 
         /* Check whether this instance is already known to be active or pending
          * anywhere, at this stage of unpacking. Because this function is called
          * for a resource before the resource's individual operation history
          * entries are unpacked, locations will generally not contain the
          * desired node.
          *
          * However, there are three exceptions:
          * (1) when child is a cloned group and we have already unpacked the
          *     history of another member of the group on the same node;
          * (2) when we've already unpacked the history of another numbered
          *     instance on the same node (which can happen if
          *     PCMK_META_GLOBALLY_UNIQUE was flipped from true to false); and
          * (3) when we re-run calculations on the same scheduler data as part of
          *     a simulation.
          */
         child->private->fns->location(child, &locations, 2);
         if (locations) {
             /* We should never associate the same numbered anonymous clone
              * instance with multiple nodes, and clone instances can't migrate,
              * so there must be only one location, regardless of history.
              */
             CRM_LOG_ASSERT(locations->next == NULL);
 
             if (pcmk__same_node((pcmk_node_t *) locations->data, node)) {
                 /* This child instance is active on the requested node, so check
                  * for a corresponding configured resource. We use find_rsc()
                  * instead of child because child may be a cloned group, and we
                  * need the particular member corresponding to rsc_id.
                  *
                  * If the history entry is orphaned, rsc will be NULL.
                  */
                 rsc = parent->private->fns->find_rsc(child, rsc_id, NULL,
                                                      pcmk_rsc_match_clone_only);
                 if (rsc) {
                     /* If there are multiple instance history entries for an
                      * anonymous clone in a single node's history (which can
                      * happen if PCMK_META_GLOBALLY_UNIQUE is switched from true
                      * to false), we want to consider the instances beyond the
                      * first as orphans, even if there are inactive instance
                      * numbers available.
                      */
                     if (rsc->private->active_nodes != NULL) {
                         crm_notice("Active (now-)anonymous clone %s has "
                                    "multiple (orphan) instance histories on %s",
                                    parent->id, pcmk__node_name(node));
                         skip_inactive = TRUE;
                         rsc = NULL;
                     } else {
                         pcmk__rsc_trace(parent, "Resource %s, active", rsc->id);
                     }
                 }
             }
             g_list_free(locations);
 
         } else {
             pcmk__rsc_trace(parent, "Resource %s, skip inactive", child->id);
             if (!skip_inactive && !inactive_instance
                 && !pcmk_is_set(child->flags, pcmk__rsc_blocked)) {
                 // Remember one inactive instance in case we don't find active
                 inactive_instance =
                     parent->private->fns->find_rsc(child, rsc_id, NULL,
                                                    pcmk_rsc_match_clone_only);
 
                 /* ... but don't use it if it was already associated with a
                  * pending action on another node
                  */
                 if (inactive_instance != NULL) {
                     const pcmk_node_t *pending_node = NULL;
 
                     pending_node = inactive_instance->private->pending_node;
                     if ((pending_node != NULL)
                         && !pcmk__same_node(pending_node, node)) {
                         inactive_instance = NULL;
                     }
                 }
             }
         }
     }
 
     if ((rsc == NULL) && !skip_inactive && (inactive_instance != NULL)) {
         pcmk__rsc_trace(parent, "Resource %s, empty slot",
                         inactive_instance->id);
         rsc = inactive_instance;
     }
 
     /* If the resource has PCMK_META_REQUIRES set to PCMK_VALUE_QUORUM or
      * PCMK_VALUE_NOTHING, and we don't have a clone instance for every node, we
      * don't want to consume a valid instance number for unclean nodes. Such
      * instances may appear to be active according to the history, but should be
      * considered inactive, so we can start an instance elsewhere. Treat such
      * instances as orphans.
      *
      * An exception is instances running on guest nodes -- since guest node
      * "fencing" is actually just a resource stop, requires shouldn't apply.
      *
      * @TODO Ideally, we'd use an inactive instance number if it is not needed
      * for any clean instances. However, we don't know that at this point.
      */
     if ((rsc != NULL) && !pcmk_is_set(rsc->flags, pcmk__rsc_needs_fencing)
         && (!node->details->online || node->details->unclean)
         && !pcmk__is_guest_or_bundle_node(node)
         && !pe__is_universal_clone(parent, scheduler)) {
 
         rsc = NULL;
     }
 
     if (rsc == NULL) {
         rsc = create_anonymous_orphan(parent, rsc_id, node, scheduler);
         pcmk__rsc_trace(parent, "Resource %s, orphan", rsc->id);
     }
     return rsc;
 }
 
 static pcmk_resource_t *
 unpack_find_resource(pcmk_scheduler_t *scheduler, const pcmk_node_t *node,
                      const char *rsc_id)
 {
     pcmk_resource_t *rsc = NULL;
     pcmk_resource_t *parent = NULL;
 
     crm_trace("looking for %s", rsc_id);
     rsc = pe_find_resource(scheduler->resources, rsc_id);
 
     if (rsc == NULL) {
         /* If we didn't find the resource by its name in the operation history,
          * check it again as a clone instance. Even when PCMK_META_CLONE_MAX=0,
          * we create a single :0 orphan to match against here.
          */
         char *clone0_id = clone_zero(rsc_id);
         pcmk_resource_t *clone0 = pe_find_resource(scheduler->resources,
                                                    clone0_id);
 
         if (clone0 && !pcmk_is_set(clone0->flags, pcmk__rsc_unique)) {
             rsc = clone0;
             parent = uber_parent(clone0);
             crm_trace("%s found as %s (%s)", rsc_id, clone0_id, parent->id);
         } else {
             crm_trace("%s is not known as %s either (orphan)",
                       rsc_id, clone0_id);
         }
         free(clone0_id);
 
     } else if (rsc->private->variant > pcmk__rsc_variant_primitive) {
         crm_trace("Resource history for %s is orphaned "
                   "because it is no longer primitive", rsc_id);
         return NULL;
 
     } else {
         parent = uber_parent(rsc);
     }
 
     if (pcmk__is_anonymous_clone(parent)) {
 
         if (pcmk__is_bundled(parent)) {
             rsc = pe__find_bundle_replica(parent->private->parent, node);
         } else {
             char *base = clone_strip(rsc_id);
 
             rsc = find_anonymous_clone(scheduler, node, parent, base);
             free(base);
             CRM_ASSERT(rsc != NULL);
         }
     }
 
     if (rsc && !pcmk__str_eq(rsc_id, rsc->id, pcmk__str_none)
         && !pcmk__str_eq(rsc_id, rsc->private->history_id, pcmk__str_none)) {
 
         pcmk__str_update(&(rsc->private->history_id), rsc_id);
         pcmk__rsc_debug(rsc, "Internally renamed %s on %s to %s%s",
                         rsc_id, pcmk__node_name(node), rsc->id,
                         pcmk_is_set(rsc->flags, pcmk__rsc_removed)? " (ORPHAN)" : "");
     }
     return rsc;
 }
 
 static pcmk_resource_t *
 process_orphan_resource(const xmlNode *rsc_entry, const pcmk_node_t *node,
                         pcmk_scheduler_t *scheduler)
 {
     pcmk_resource_t *rsc = NULL;
     const char *rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
 
     crm_debug("Detected orphan resource %s on %s",
               rsc_id, pcmk__node_name(node));
     rsc = create_fake_resource(rsc_id, rsc_entry, scheduler);
     if (rsc == NULL) {
         return NULL;
     }
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_stop_removed_resources)) {
         pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed);
 
     } else {
         CRM_CHECK(rsc != NULL, return NULL);
         pcmk__rsc_trace(rsc, "Added orphan %s", rsc->id);
         resource_location(rsc, NULL, -PCMK_SCORE_INFINITY,
                           "__orphan_do_not_run__", scheduler);
     }
     return rsc;
 }
 
 static void
 process_rsc_state(pcmk_resource_t *rsc, pcmk_node_t *node,
                   enum pcmk__on_fail on_fail)
 {
     pcmk_node_t *tmpnode = NULL;
     char *reason = NULL;
     enum pcmk__on_fail save_on_fail = pcmk__on_fail_ignore;
     pcmk_scheduler_t *scheduler = NULL;
     bool known_active = false;
 
     CRM_ASSERT(rsc);
     scheduler = rsc->private->scheduler;
     known_active = (rsc->private->orig_role > pcmk_role_stopped);
     pcmk__rsc_trace(rsc, "Resource %s is %s on %s: on_fail=%s",
                     rsc->id, pcmk_role_text(rsc->private->orig_role),
                     pcmk__node_name(node), pcmk__on_fail_text(on_fail));
 
     /* process current state */
     if (rsc->private->orig_role != pcmk_role_unknown) {
         pcmk_resource_t *iter = rsc;
 
         while (iter) {
             if (g_hash_table_lookup(iter->private->probed_nodes,
                                     node->private->id) == NULL) {
                 pcmk_node_t *n = pe__copy_node(node);
 
                 pcmk__rsc_trace(rsc, "%s (%s in history) known on %s",
                                 rsc->id,
                                 pcmk__s(rsc->private->history_id, "the same"),
                                 pcmk__node_name(n));
                 g_hash_table_insert(iter->private->probed_nodes,
                                     (gpointer) n->private->id, n);
             }
             if (pcmk_is_set(iter->flags, pcmk__rsc_unique)) {
                 break;
             }
             iter = iter->private->parent;
         }
     }
 
     /* If a managed resource is believed to be running, but node is down ... */
     if (known_active && !node->details->online && !node->details->maintenance
         && pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
 
         gboolean should_fence = FALSE;
 
         /* If this is a guest node, fence it (regardless of whether fencing is
          * enabled, because guest node fencing is done by recovery of the
          * container resource rather than by the fencer). Mark the resource
          * we're processing as failed. When the guest comes back up, its
          * operation history in the CIB will be cleared, freeing the affected
          * resource to run again once we are sure we know its state.
          */
         if (pcmk__is_guest_or_bundle_node(node)) {
             pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
             should_fence = TRUE;
 
         } else if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
             if (pcmk__is_remote_node(node)
                 && (node->private->remote != NULL)
                 && !pcmk_is_set(node->private->remote->flags,
                                 pcmk__rsc_failed)) {
 
                 /* Setting unseen means that fencing of the remote node will
                  * occur only if the connection resource is not going to start
                  * somewhere. This allows connection resources on a failed
                  * cluster node to move to another node without requiring the
                  * remote nodes to be fenced as well.
                  */
                 pcmk__clear_node_flags(node, pcmk__node_seen);
                 reason = crm_strdup_printf("%s is active there (fencing will be"
                                            " revoked if remote connection can "
                                            "be re-established elsewhere)",
                                            rsc->id);
             }
             should_fence = TRUE;
         }
 
         if (should_fence) {
             if (reason == NULL) {
                reason = crm_strdup_printf("%s is thought to be active there", rsc->id);
             }
             pe_fence_node(scheduler, node, reason, FALSE);
         }
         free(reason);
     }
 
     /* In order to calculate priority_fencing_delay correctly, save the failure information and pass it to native_add_running(). */
     save_on_fail = on_fail;
 
     if (node->details->unclean) {
         /* No extra processing needed
          * Also allows resources to be started again after a node is shot
          */
         on_fail = pcmk__on_fail_ignore;
     }
 
     switch (on_fail) {
         case pcmk__on_fail_ignore:
             /* nothing to do */
             break;
 
         case pcmk__on_fail_demote:
             pcmk__set_rsc_flags(rsc, pcmk__rsc_failed);
             demote_action(rsc, node, FALSE);
             break;
 
         case pcmk__on_fail_fence_node:
             /* treat it as if it is still running
              * but also mark the node as unclean
              */
             reason = crm_strdup_printf("%s failed there", rsc->id);
             pe_fence_node(scheduler, node, reason, FALSE);
             free(reason);
             break;
 
         case pcmk__on_fail_standby_node:
             pcmk__set_node_flags(node,
                                  pcmk__node_standby|pcmk__node_fail_standby);
             break;
 
         case pcmk__on_fail_block:
             /* is_managed == FALSE will prevent any
              * actions being sent for the resource
              */
             pcmk__clear_rsc_flags(rsc, pcmk__rsc_managed);
             pcmk__set_rsc_flags(rsc, pcmk__rsc_blocked);
             break;
 
         case pcmk__on_fail_ban:
             /* make sure it comes up somewhere else
              * or not at all
              */
             resource_location(rsc, node, -PCMK_SCORE_INFINITY,
                               "__action_migration_auto__", scheduler);
             break;
 
         case pcmk__on_fail_stop:
             pe__set_next_role(rsc, pcmk_role_stopped,
                               PCMK_META_ON_FAIL "=" PCMK_VALUE_STOP);
             break;
 
         case pcmk__on_fail_restart:
             if (known_active) {
                 pcmk__set_rsc_flags(rsc,
                                     pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
                 stop_action(rsc, node, FALSE);
             }
             break;
 
         case pcmk__on_fail_restart_container:
             pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
             if ((rsc->private->launcher != NULL) && pcmk__is_bundled(rsc)) {
                 /* A bundle's remote connection can run on a different node than
                  * the bundle's container. We don't necessarily know where the
                  * container is running yet, so remember it and add a stop
                  * action for it later.
                  */
                 scheduler->stop_needed = g_list_prepend(scheduler->stop_needed,
                                                         rsc->private->launcher);
             } else if (rsc->private->launcher != NULL) {
                 stop_action(rsc->private->launcher, node, FALSE);
             } else if (known_active) {
                 stop_action(rsc, node, FALSE);
             }
             break;
 
         case pcmk__on_fail_reset_remote:
             pcmk__set_rsc_flags(rsc, pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
             if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
                 tmpnode = NULL;
                 if (pcmk_is_set(rsc->flags, pcmk__rsc_is_remote_connection)) {
                     tmpnode = pcmk_find_node(scheduler, rsc->id);
                 }
                 if (pcmk__is_remote_node(tmpnode)
                     && !pcmk_is_set(tmpnode->private->flags,
                                     pcmk__node_remote_fenced)) {
                     /* The remote connection resource failed in a way that
                      * should result in fencing the remote node.
                      */
                     pe_fence_node(scheduler, tmpnode,
                                   "remote connection is unrecoverable", FALSE);
                 }
             }
 
             /* require the stop action regardless if fencing is occurring or not. */
             if (known_active) {
                 stop_action(rsc, node, FALSE);
             }
 
             /* if reconnect delay is in use, prevent the connection from exiting the
              * "STOPPED" role until the failure is cleared by the delay timeout. */
             if (rsc->private->remote_reconnect_ms > 0U) {
                 pe__set_next_role(rsc, pcmk_role_stopped, "remote reset");
             }
             break;
     }
 
     /* Ensure a remote connection failure forces an unclean Pacemaker Remote
      * node to be fenced. By marking the node as seen, the failure will result
      * in a fencing operation regardless if we're going to attempt to reconnect
      * in this transition.
      */
     if (pcmk_all_flags_set(rsc->flags,
                            pcmk__rsc_failed|pcmk__rsc_is_remote_connection)) {
         tmpnode = pcmk_find_node(scheduler, rsc->id);
         if (tmpnode && tmpnode->details->unclean) {
             pcmk__set_node_flags(tmpnode, pcmk__node_seen);
         }
     }
 
     if (known_active) {
         if (pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
             if (pcmk_is_set(rsc->flags, pcmk__rsc_managed)) {
                 crm_notice("Removed resource %s is active on %s and will be "
                            "stopped when possible",
                            rsc->id, pcmk__node_name(node));
             } else {
                 crm_notice("Removed resource %s must be stopped manually on %s "
                            "because " PCMK_OPT_STOP_ORPHAN_RESOURCES
                            " is set to false", rsc->id, pcmk__node_name(node));
             }
         }
 
         native_add_running(rsc, node, scheduler,
                            (save_on_fail != pcmk__on_fail_ignore));
         switch (on_fail) {
             case pcmk__on_fail_ignore:
                 break;
             case pcmk__on_fail_demote:
             case pcmk__on_fail_block:
                 pcmk__set_rsc_flags(rsc, pcmk__rsc_failed);
                 break;
             default:
                 pcmk__set_rsc_flags(rsc,
                                     pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
                 break;
         }
 
     } else if ((rsc->private->history_id != NULL)
                && (strchr(rsc->private->history_id, ':') != NULL)) {
         /* Only do this for older status sections that included instance numbers
          * Otherwise stopped instances will appear as orphans
          */
         pcmk__rsc_trace(rsc, "Clearing history ID %s for %s (stopped)",
                         rsc->private->history_id, rsc->id);
         free(rsc->private->history_id);
         rsc->private->history_id = NULL;
 
     } else {
         GList *possible_matches = pe__resource_actions(rsc, node,
                                                        PCMK_ACTION_STOP, FALSE);
         GList *gIter = possible_matches;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pcmk_action_t *stop = (pcmk_action_t *) gIter->data;
 
             pcmk__set_action_flags(stop, pcmk__action_optional);
         }
 
         g_list_free(possible_matches);
     }
 
     /* A successful stop after migrate_to on the migration source doesn't make
      * the partially migrated resource stopped on the migration target.
      */
     if ((rsc->private->orig_role == pcmk_role_stopped)
         && (rsc->private->active_nodes != NULL)
         && (rsc->private->partial_migration_target != NULL)
         && pcmk__same_node(rsc->private->partial_migration_source, node)) {
 
         rsc->private->orig_role = pcmk_role_started;
     }
 }
 
 /* create active recurring operations as optional */
 static void
 process_recurring(pcmk_node_t *node, pcmk_resource_t *rsc,
                   int start_index, int stop_index,
                   GList *sorted_op_list, pcmk_scheduler_t *scheduler)
 {
     int counter = -1;
     const char *task = NULL;
     const char *status = NULL;
     GList *gIter = sorted_op_list;
 
     CRM_ASSERT(rsc);
     pcmk__rsc_trace(rsc, "%s: Start index %d, stop index = %d",
                     rsc->id, start_index, stop_index);
 
     for (; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         guint interval_ms = 0;
         char *key = NULL;
         const char *id = pcmk__xe_id(rsc_op);
 
         counter++;
 
         if (node->details->online == FALSE) {
             pcmk__rsc_trace(rsc, "Skipping %s on %s: node is offline",
                             rsc->id, pcmk__node_name(node));
             break;
 
             /* Need to check if there's a monitor for role="Stopped" */
         } else if (start_index < stop_index && counter <= stop_index) {
             pcmk__rsc_trace(rsc, "Skipping %s on %s: resource is not active",
                             id, pcmk__node_name(node));
             continue;
 
         } else if (counter < start_index) {
             pcmk__rsc_trace(rsc, "Skipping %s on %s: old %d",
                             id, pcmk__node_name(node), counter);
             continue;
         }
 
         crm_element_value_ms(rsc_op, PCMK_META_INTERVAL, &interval_ms);
         if (interval_ms == 0) {
             pcmk__rsc_trace(rsc, "Skipping %s on %s: non-recurring",
                             id, pcmk__node_name(node));
             continue;
         }
 
         status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
         if (pcmk__str_eq(status, "-1", pcmk__str_casei)) {
             pcmk__rsc_trace(rsc, "Skipping %s on %s: status",
                             id, pcmk__node_name(node));
             continue;
         }
         task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
         /* create the action */
         key = pcmk__op_key(rsc->id, task, interval_ms);
         pcmk__rsc_trace(rsc, "Creating %s on %s", key, pcmk__node_name(node));
         custom_action(rsc, key, task, node, TRUE, scheduler);
     }
 }
 
 void
 calculate_active_ops(const GList *sorted_op_list, int *start_index,
                      int *stop_index)
 {
     int counter = -1;
     int implied_monitor_start = -1;
     int implied_clone_start = -1;
     const char *task = NULL;
     const char *status = NULL;
 
     *stop_index = -1;
     *start_index = -1;
 
     for (const GList *iter = sorted_op_list; iter != NULL; iter = iter->next) {
         const xmlNode *rsc_op = (const xmlNode *) iter->data;
 
         counter++;
 
         task = crm_element_value(rsc_op, PCMK_XA_OPERATION);
         status = crm_element_value(rsc_op, PCMK__XA_OP_STATUS);
 
         if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)
             && pcmk__str_eq(status, "0", pcmk__str_casei)) {
             *stop_index = counter;
 
         } else if (pcmk__strcase_any_of(task, PCMK_ACTION_START,
                                         PCMK_ACTION_MIGRATE_FROM, NULL)) {
             *start_index = counter;
 
         } else if ((implied_monitor_start <= *stop_index)
                    && pcmk__str_eq(task, PCMK_ACTION_MONITOR,
                                    pcmk__str_casei)) {
             const char *rc = crm_element_value(rsc_op, PCMK__XA_RC_CODE);
 
             if (pcmk__strcase_any_of(rc, "0", "8", NULL)) {
                 implied_monitor_start = counter;
             }
         } else if (pcmk__strcase_any_of(task, PCMK_ACTION_PROMOTE,
                                         PCMK_ACTION_DEMOTE, NULL)) {
             implied_clone_start = counter;
         }
     }
 
     if (*start_index == -1) {
         if (implied_clone_start != -1) {
             *start_index = implied_clone_start;
         } else if (implied_monitor_start != -1) {
             *start_index = implied_monitor_start;
         }
     }
 }
 
 // If resource history entry has shutdown lock, remember lock node and time
 static void
 unpack_shutdown_lock(const xmlNode *rsc_entry, pcmk_resource_t *rsc,
                      const pcmk_node_t *node, pcmk_scheduler_t *scheduler)
 {
     time_t lock_time = 0;   // When lock started (i.e. node shutdown time)
 
     if ((crm_element_value_epoch(rsc_entry, PCMK_OPT_SHUTDOWN_LOCK,
                                  &lock_time) == pcmk_ok) && (lock_time != 0)) {
 
         if ((scheduler->shutdown_lock > 0)
             && (get_effective_time(scheduler)
                 > (lock_time + scheduler->shutdown_lock))) {
             pcmk__rsc_info(rsc, "Shutdown lock for %s on %s expired",
                            rsc->id, pcmk__node_name(node));
             pe__clear_resource_history(rsc, node);
         } else {
             rsc->private->lock_node = node;
             rsc->private->lock_time = lock_time;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Unpack one \c PCMK__XE_LRM_RESOURCE entry from a node's CIB status
  *
  * \param[in,out] node       Node whose status is being unpacked
  * \param[in]     rsc_entry  \c PCMK__XE_LRM_RESOURCE XML being unpacked
  * \param[in,out] scheduler  Scheduler data
  *
  * \return Resource corresponding to the entry, or NULL if no operation history
  */
 static pcmk_resource_t *
 unpack_lrm_resource(pcmk_node_t *node, const xmlNode *lrm_resource,
                     pcmk_scheduler_t *scheduler)
 {
     GList *gIter = NULL;
     int stop_index = -1;
     int start_index = -1;
     enum rsc_role_e req_role = pcmk_role_unknown;
 
     const char *rsc_id = pcmk__xe_id(lrm_resource);
 
     pcmk_resource_t *rsc = NULL;
     GList *op_list = NULL;
     GList *sorted_op_list = NULL;
 
     xmlNode *rsc_op = NULL;
     xmlNode *last_failure = NULL;
 
     enum pcmk__on_fail on_fail = pcmk__on_fail_ignore;
     enum rsc_role_e saved_role = pcmk_role_unknown;
 
     if (rsc_id == NULL) {
         pcmk__config_err("Ignoring invalid " PCMK__XE_LRM_RESOURCE
                          " entry: No " PCMK_XA_ID);
         crm_log_xml_info(lrm_resource, "missing-id");
         return NULL;
     }
     crm_trace("Unpacking " PCMK__XE_LRM_RESOURCE " for %s on %s",
               rsc_id, pcmk__node_name(node));
 
     /* Build a list of individual PCMK__XE_LRM_RSC_OP entries, so we can sort
      * them
      */
     for (rsc_op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP, NULL,
                                        NULL);
          rsc_op != NULL; rsc_op = pcmk__xe_next_same(rsc_op)) {
 
         op_list = g_list_prepend(op_list, rsc_op);
     }
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
         if (op_list == NULL) {
             // If there are no operations, there is nothing to do
             return NULL;
         }
     }
 
     /* find the resource */
     rsc = unpack_find_resource(scheduler, node, rsc_id);
     if (rsc == NULL) {
         if (op_list == NULL) {
             // If there are no operations, there is nothing to do
             return NULL;
         } else {
             rsc = process_orphan_resource(lrm_resource, node, scheduler);
         }
     }
     CRM_ASSERT(rsc != NULL);
 
     // Check whether the resource is "shutdown-locked" to this node
     if (pcmk_is_set(scheduler->flags, pcmk_sched_shutdown_lock)) {
         unpack_shutdown_lock(lrm_resource, rsc, node, scheduler);
     }
 
     /* process operations */
     saved_role = rsc->private->orig_role;
     rsc->private->orig_role = pcmk_role_unknown;
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         unpack_rsc_op(rsc, node, rsc_op, &last_failure, &on_fail);
     }
 
     /* create active recurring operations as optional */
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
     process_recurring(node, rsc, start_index, stop_index, sorted_op_list,
                       scheduler);
 
     /* no need to free the contents */
     g_list_free(sorted_op_list);
 
     process_rsc_state(rsc, node, on_fail);
 
     if (get_target_role(rsc, &req_role)) {
         if ((rsc->private->next_role == pcmk_role_unknown)
             || (req_role < rsc->private->next_role)) {
 
             pe__set_next_role(rsc, req_role, PCMK_META_TARGET_ROLE);
 
         } else if (req_role > rsc->private->next_role) {
             pcmk__rsc_info(rsc,
                            "%s: Not overwriting calculated next role %s"
                            " with requested next role %s",
                            rsc->id, pcmk_role_text(rsc->private->next_role),
                            pcmk_role_text(req_role));
         }
     }
 
     if (saved_role > rsc->private->orig_role) {
         rsc->private->orig_role = saved_role;
     }
 
     return rsc;
 }
 
 static void
 handle_removed_launched_resources(const xmlNode *lrm_rsc_list,
                                   pcmk_scheduler_t *scheduler)
 {
     for (const xmlNode *rsc_entry = pcmk__xe_first_child(lrm_rsc_list, NULL,
                                                          NULL, NULL);
          rsc_entry != NULL; rsc_entry = pcmk__xe_next(rsc_entry)) {
 
         pcmk_resource_t *rsc;
         pcmk_resource_t *launcher = NULL;
         const char *rsc_id;
         const char *launcher_id = NULL;
 
         if (!pcmk__xe_is(rsc_entry, PCMK__XE_LRM_RESOURCE)) {
             continue;
         }
 
         launcher_id = crm_element_value(rsc_entry, PCMK__META_CONTAINER);
         rsc_id = crm_element_value(rsc_entry, PCMK_XA_ID);
         if ((launcher_id == NULL) || (rsc_id == NULL)) {
             continue;
         }
 
         launcher = pe_find_resource(scheduler->resources, launcher_id);
         if (launcher == NULL) {
             continue;
         }
 
         rsc = pe_find_resource(scheduler->resources, rsc_id);
         if ((rsc == NULL) || (rsc->private->launcher != NULL)
             || !pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) {
             continue;
         }
 
         pcmk__rsc_trace(rsc, "Mapped launcher of removed resource %s to %s",
                         rsc->id, launcher_id);
         rsc->private->launcher = launcher;
         launcher->private->launched = g_list_append(launcher->private->launched,
                                                     rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Unpack one node's lrm status section
  *
  * \param[in,out] node       Node whose status is being unpacked
  * \param[in]     xml        CIB node state XML
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 unpack_node_lrm(pcmk_node_t *node, const xmlNode *xml,
                 pcmk_scheduler_t *scheduler)
 {
     bool found_removed_launched_resource = false;
 
     // Drill down to PCMK__XE_LRM_RESOURCES section
     xml = pcmk__xe_first_child(xml, PCMK__XE_LRM, NULL, NULL);
     if (xml == NULL) {
         return;
     }
     xml = pcmk__xe_first_child(xml, PCMK__XE_LRM_RESOURCES, NULL, NULL);
     if (xml == NULL) {
         return;
     }
 
     // Unpack each PCMK__XE_LRM_RESOURCE entry
     for (const xmlNode *rsc_entry = pcmk__xe_first_child(xml,
                                                          PCMK__XE_LRM_RESOURCE,
                                                          NULL, NULL);
          rsc_entry != NULL; rsc_entry = pcmk__xe_next_same(rsc_entry)) {
 
         pcmk_resource_t *rsc = unpack_lrm_resource(node, rsc_entry, scheduler);
 
         if ((rsc != NULL)
             && pcmk_is_set(rsc->flags, pcmk__rsc_removed_launched)) {
             found_removed_launched_resource = true;
         }
     }
 
     /* Now that all resource state has been unpacked for this node, map any
      * removed launched resources to their launchers.
      */
     if (found_removed_launched_resource) {
         handle_removed_launched_resources(xml, scheduler);
     }
 }
 
 static void
 set_active(pcmk_resource_t *rsc)
 {
     const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
 
     if (top && pcmk_is_set(top->flags, pcmk__rsc_promotable)) {
         rsc->private->orig_role = pcmk_role_unpromoted;
     } else {
         rsc->private->orig_role = pcmk_role_started;
     }
 }
 
 static void
 set_node_score(gpointer key, gpointer value, gpointer user_data)
 {
     pcmk_node_t *node = value;
     int *score = user_data;
 
     node->assign->score = *score;
 }
 
 #define XPATH_NODE_STATE "/" PCMK_XE_CIB "/" PCMK_XE_STATUS \
                          "/" PCMK__XE_NODE_STATE
 #define SUB_XPATH_LRM_RESOURCE "/" PCMK__XE_LRM             \
                                "/" PCMK__XE_LRM_RESOURCES   \
                                "/" PCMK__XE_LRM_RESOURCE
 #define SUB_XPATH_LRM_RSC_OP "/" PCMK__XE_LRM_RSC_OP
 
 static xmlNode *
 find_lrm_op(const char *resource, const char *op, const char *node, const char *source,
             int target_rc, pcmk_scheduler_t *scheduler)
 {
     GString *xpath = NULL;
     xmlNode *xml = NULL;
 
     CRM_CHECK((resource != NULL) && (op != NULL) && (node != NULL),
               return NULL);
 
     xpath = g_string_sized_new(256);
     pcmk__g_strcat(xpath,
                    XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node, "']"
                    SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", resource, "']"
                    SUB_XPATH_LRM_RSC_OP "[@" PCMK_XA_OPERATION "='", op, "'",
                    NULL);
 
     /* Need to check against transition_magic too? */
     if ((source != NULL) && (strcmp(op, PCMK_ACTION_MIGRATE_TO) == 0)) {
         pcmk__g_strcat(xpath,
                        " and @" PCMK__META_MIGRATE_TARGET "='", source, "']",
                        NULL);
 
     } else if ((source != NULL)
                && (strcmp(op, PCMK_ACTION_MIGRATE_FROM) == 0)) {
         pcmk__g_strcat(xpath,
                        " and @" PCMK__META_MIGRATE_SOURCE "='", source, "']",
                        NULL);
     } else {
         g_string_append_c(xpath, ']');
     }
 
     xml = get_xpath_object((const char *) xpath->str, scheduler->input,
                            LOG_DEBUG);
     g_string_free(xpath, TRUE);
 
     if (xml && target_rc >= 0) {
         int rc = PCMK_OCF_UNKNOWN_ERROR;
         int status = PCMK_EXEC_ERROR;
 
         crm_element_value_int(xml, PCMK__XA_RC_CODE, &rc);
         crm_element_value_int(xml, PCMK__XA_OP_STATUS, &status);
         if ((rc != target_rc) || (status != PCMK_EXEC_DONE)) {
             return NULL;
         }
     }
     return xml;
 }
 
 static xmlNode *
 find_lrm_resource(const char *rsc_id, const char *node_name,
                   pcmk_scheduler_t *scheduler)
 {
     GString *xpath = NULL;
     xmlNode *xml = NULL;
 
     CRM_CHECK((rsc_id != NULL) && (node_name != NULL), return NULL);
 
     xpath = g_string_sized_new(256);
     pcmk__g_strcat(xpath,
                    XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='", node_name, "']"
                    SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='", rsc_id, "']",
                    NULL);
 
     xml = get_xpath_object((const char *) xpath->str, scheduler->input,
                            LOG_DEBUG);
 
     g_string_free(xpath, TRUE);
     return xml;
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has no completed action history on a node
  *
  * \param[in,out] rsc        Resource to check
  * \param[in]     node_name  Node to check
  *
  * \return true if \p rsc_id is unknown on \p node_name, otherwise false
  */
 static bool
 unknown_on_node(pcmk_resource_t *rsc, const char *node_name)
 {
     bool result = false;
     xmlXPathObjectPtr search;
     char *xpath = NULL;
 
     xpath = crm_strdup_printf(XPATH_NODE_STATE "[@" PCMK_XA_UNAME "='%s']"
                               SUB_XPATH_LRM_RESOURCE "[@" PCMK_XA_ID "='%s']"
                               SUB_XPATH_LRM_RSC_OP
                               "[@" PCMK__XA_RC_CODE "!='%d']",
                               node_name, rsc->id, PCMK_OCF_UNKNOWN);
 
     search = xpath_search(rsc->private->scheduler->input, xpath);
     result = (numXpathResults(search) == 0);
     freeXpathObject(search);
     free(xpath);
     return result;
 }
 
 /*!
  * \internal
  * \brief Check whether a probe/monitor indicating the resource was not running
  *        on a node happened after some event
  *
  * \param[in]     rsc_id     Resource being checked
  * \param[in]     node_name  Node being checked
  * \param[in]     xml_op     Event that monitor is being compared to
  * \param[in,out] scheduler  Scheduler data
  *
  * \return true if such a monitor happened after event, false otherwise
  */
 static bool
 monitor_not_running_after(const char *rsc_id, const char *node_name,
                           const xmlNode *xml_op, pcmk_scheduler_t *scheduler)
 {
     /* Any probe/monitor operation on the node indicating it was not running
      * there
      */
     xmlNode *monitor = find_lrm_op(rsc_id, PCMK_ACTION_MONITOR, node_name,
                                    NULL, PCMK_OCF_NOT_RUNNING, scheduler);
 
     return (monitor != NULL) && (pe__is_newer_op(monitor, xml_op) > 0);
 }
 
 /*!
  * \internal
  * \brief Check whether any non-monitor operation on a node happened after some
  *        event
  *
  * \param[in]     rsc_id     Resource being checked
  * \param[in]     node_name  Node being checked
  * \param[in]     xml_op     Event that non-monitor is being compared to
  * \param[in,out] scheduler  Scheduler data
  *
  * \return true if such a operation happened after event, false otherwise
  */
 static bool
 non_monitor_after(const char *rsc_id, const char *node_name,
                   const xmlNode *xml_op, pcmk_scheduler_t *scheduler)
 {
     xmlNode *lrm_resource = NULL;
 
     lrm_resource = find_lrm_resource(rsc_id, node_name, scheduler);
     if (lrm_resource == NULL) {
         return false;
     }
 
     for (xmlNode *op = pcmk__xe_first_child(lrm_resource, PCMK__XE_LRM_RSC_OP,
                                             NULL, NULL);
          op != NULL; op = pcmk__xe_next_same(op)) {
 
         const char * task = NULL;
 
         if (op == xml_op) {
             continue;
         }
 
         task = crm_element_value(op, PCMK_XA_OPERATION);
 
         if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_STOP,
                              PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
                              NULL)
             && pe__is_newer_op(op, xml_op) > 0) {
             return true;
         }
     }
 
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether the resource has newer state on a node after a migration
  *        attempt
  *
  * \param[in]     rsc_id        Resource being checked
  * \param[in]     node_name     Node being checked
  * \param[in]     migrate_to    Any migrate_to event that is being compared to
  * \param[in]     migrate_from  Any migrate_from event that is being compared to
  * \param[in,out] scheduler     Scheduler data
  *
  * \return true if such a operation happened after event, false otherwise
  */
 static bool
 newer_state_after_migrate(const char *rsc_id, const char *node_name,
                           const xmlNode *migrate_to,
                           const xmlNode *migrate_from,
                           pcmk_scheduler_t *scheduler)
 {
     const xmlNode *xml_op = (migrate_from != NULL)? migrate_from : migrate_to;
     const char *source = crm_element_value(xml_op, PCMK__META_MIGRATE_SOURCE);
 
     /* It's preferred to compare to the migrate event on the same node if
      * existing, since call ids are more reliable.
      */
     if ((xml_op != migrate_to) && (migrate_to != NULL)
         && pcmk__str_eq(node_name, source, pcmk__str_casei)) {
 
         xml_op = migrate_to;
     }
 
     /* If there's any newer non-monitor operation on the node, or any newer
      * probe/monitor operation on the node indicating it was not running there,
      * the migration events potentially no longer matter for the node.
      */
     return non_monitor_after(rsc_id, node_name, xml_op, scheduler)
            || monitor_not_running_after(rsc_id, node_name, xml_op, scheduler);
 }
 
 /*!
  * \internal
  * \brief Parse migration source and target node names from history entry
  *
  * \param[in]  entry        Resource history entry for a migration action
  * \param[in]  source_node  If not NULL, source must match this node
  * \param[in]  target_node  If not NULL, target must match this node
  * \param[out] source_name  Where to store migration source node name
  * \param[out] target_name  Where to store migration target node name
  *
  * \return Standard Pacemaker return code
  */
 static int
 get_migration_node_names(const xmlNode *entry, const pcmk_node_t *source_node,
                          const pcmk_node_t *target_node,
                          const char **source_name, const char **target_name)
 {
     *source_name = crm_element_value(entry, PCMK__META_MIGRATE_SOURCE);
     *target_name = crm_element_value(entry, PCMK__META_MIGRATE_TARGET);
     if ((*source_name == NULL) || (*target_name == NULL)) {
         pcmk__config_err("Ignoring resource history entry %s without "
                          PCMK__META_MIGRATE_SOURCE " and "
                          PCMK__META_MIGRATE_TARGET, pcmk__xe_id(entry));
         return pcmk_rc_unpack_error;
     }
 
     if ((source_node != NULL)
         && !pcmk__str_eq(*source_name, source_node->private->name,
                          pcmk__str_casei|pcmk__str_null_matches)) {
         pcmk__config_err("Ignoring resource history entry %s because "
                          PCMK__META_MIGRATE_SOURCE "='%s' does not match %s",
                          pcmk__xe_id(entry), *source_name,
                          pcmk__node_name(source_node));
         return pcmk_rc_unpack_error;
     }
 
     if ((target_node != NULL)
         && !pcmk__str_eq(*target_name, target_node->private->name,
                          pcmk__str_casei|pcmk__str_null_matches)) {
         pcmk__config_err("Ignoring resource history entry %s because "
                          PCMK__META_MIGRATE_TARGET "='%s' does not match %s",
                          pcmk__xe_id(entry), *target_name,
                          pcmk__node_name(target_node));
         return pcmk_rc_unpack_error;
     }
 
     return pcmk_rc_ok;
 }
 
 /*
  * \internal
  * \brief Add a migration source to a resource's list of dangling migrations
  *
  * If the migrate_to and migrate_from actions in a live migration both
  * succeeded, but there is no stop on the source, the migration is considered
  * "dangling." Add the source to the resource's dangling migration list, which
  * will be used to schedule a stop on the source without affecting the target.
  *
  * \param[in,out] rsc   Resource involved in migration
  * \param[in]     node  Migration source
  */
 static void
 add_dangling_migration(pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     pcmk__rsc_trace(rsc, "Dangling migration of %s requires stop on %s",
                     rsc->id, pcmk__node_name(node));
     rsc->private->orig_role = pcmk_role_stopped;
     rsc->private->dangling_migration_sources =
         g_list_prepend(rsc->private->dangling_migration_sources,
                        (gpointer) node);
 }
 
 /*!
  * \internal
  * \brief Update resource role etc. after a successful migrate_to action
  *
  * \param[in,out] history  Parsed action result history
  */
 static void
 unpack_migrate_to_success(struct action_history *history)
 {
     /* A complete migration sequence is:
      * 1. migrate_to on source node (which succeeded if we get to this function)
      * 2. migrate_from on target node
      * 3. stop on source node
      *
      * If no migrate_from has happened, the migration is considered to be
      * "partial". If the migrate_from succeeded but no stop has happened, the
      * migration is considered to be "dangling".
      *
      * If a successful migrate_to and stop have happened on the source node, we
      * still need to check for a partial migration, due to scenarios (easier to
      * produce with batch-limit=1) like:
      *
      * - A resource is migrating from node1 to node2, and a migrate_to is
      *   initiated for it on node1.
      *
      * - node2 goes into standby mode while the migrate_to is pending, which
      *   aborts the transition.
      *
      * - Upon completion of the migrate_to, a new transition schedules a stop
      *   on both nodes and a start on node1.
      *
      * - If the new transition is aborted for any reason while the resource is
      *   stopping on node1, the transition after that stop completes will see
      *   the migrate_to and stop on the source, but it's still a partial
      *   migration, and the resource must be stopped on node2 because it is
      *   potentially active there due to the migrate_to.
      *
      *   We also need to take into account that either node's history may be
      *   cleared at any point in the migration process.
      */
     int from_rc = PCMK_OCF_OK;
     int from_status = PCMK_EXEC_PENDING;
     pcmk_node_t *target_node = NULL;
     xmlNode *migrate_from = NULL;
     const char *source = NULL;
     const char *target = NULL;
     bool source_newer_op = false;
     bool target_newer_state = false;
     bool active_on_target = false;
     pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
 
     // Get source and target node names from XML
     if (get_migration_node_names(history->xml, history->node, NULL, &source,
                                  &target) != pcmk_rc_ok) {
         return;
     }
 
     // Check for newer state on the source
     source_newer_op = non_monitor_after(history->rsc->id, source, history->xml,
                                         scheduler);
 
     // Check for a migrate_from action from this source on the target
     migrate_from = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_FROM,
                                target, source, -1, scheduler);
     if (migrate_from != NULL) {
         if (source_newer_op) {
             /* There's a newer non-monitor operation on the source and a
              * migrate_from on the target, so this migrate_to is irrelevant to
              * the resource's state.
              */
             return;
         }
         crm_element_value_int(migrate_from, PCMK__XA_RC_CODE, &from_rc);
         crm_element_value_int(migrate_from, PCMK__XA_OP_STATUS, &from_status);
     }
 
     /* If the resource has newer state on both the source and target after the
      * migration events, this migrate_to is irrelevant to the resource's state.
      */
     target_newer_state = newer_state_after_migrate(history->rsc->id, target,
                                                    history->xml, migrate_from,
                                                    scheduler);
     if (source_newer_op && target_newer_state) {
         return;
     }
 
     /* Check for dangling migration (migrate_from succeeded but stop not done).
      * We know there's no stop because we already returned if the target has a
      * migrate_from and the source has any newer non-monitor operation.
      */
     if ((from_rc == PCMK_OCF_OK) && (from_status == PCMK_EXEC_DONE)) {
         add_dangling_migration(history->rsc, history->node);
         return;
     }
 
     /* Without newer state, this migrate_to implies the resource is active.
      * (Clones are not allowed to migrate, so role can't be promoted.)
      */
     history->rsc->private->orig_role = pcmk_role_started;
 
     target_node = pcmk_find_node(scheduler, target);
     active_on_target = !target_newer_state && (target_node != NULL)
                        && target_node->details->online;
 
     if (from_status != PCMK_EXEC_PENDING) { // migrate_from failed on target
         if (active_on_target) {
             native_add_running(history->rsc, target_node, scheduler, TRUE);
         } else {
             // Mark resource as failed, require recovery, and prevent migration
             pcmk__set_rsc_flags(history->rsc,
                                 pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
             pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable);
         }
         return;
     }
 
     // The migrate_from is pending, complete but erased, or to be scheduled
 
     /* If there is no history at all for the resource on an online target, then
      * it was likely cleaned. Just return, and we'll schedule a probe. Once we
      * have the probe result, it will be reflected in target_newer_state.
      */
     if ((target_node != NULL) && target_node->details->online
         && unknown_on_node(history->rsc, target)) {
         return;
     }
 
     if (active_on_target) {
         pcmk_node_t *source_node = pcmk_find_node(scheduler, source);
 
         native_add_running(history->rsc, target_node, scheduler, FALSE);
         if ((source_node != NULL) && source_node->details->online) {
             /* This is a partial migration: the migrate_to completed
              * successfully on the source, but the migrate_from has not
              * completed. Remember the source and target; if the newly
              * chosen target remains the same when we schedule actions
              * later, we may continue with the migration.
              */
             history->rsc->private->partial_migration_target = target_node;
             history->rsc->private->partial_migration_source = source_node;
         }
 
     } else if (!source_newer_op) {
         // Mark resource as failed, require recovery, and prevent migration
         pcmk__set_rsc_flags(history->rsc,
                             pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
         pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_migratable);
     }
 }
 
 /*!
  * \internal
  * \brief Update resource role etc. after a failed migrate_to action
  *
  * \param[in,out] history  Parsed action result history
  */
 static void
 unpack_migrate_to_failure(struct action_history *history)
 {
     xmlNode *target_migrate_from = NULL;
     const char *source = NULL;
     const char *target = NULL;
     pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
 
     // Get source and target node names from XML
     if (get_migration_node_names(history->xml, history->node, NULL, &source,
                                  &target) != pcmk_rc_ok) {
         return;
     }
 
     /* If a migration failed, we have to assume the resource is active. Clones
      * are not allowed to migrate, so role can't be promoted.
      */
     history->rsc->private->orig_role = pcmk_role_started;
 
     // Check for migrate_from on the target
     target_migrate_from = find_lrm_op(history->rsc->id,
                                       PCMK_ACTION_MIGRATE_FROM, target, source,
                                       PCMK_OCF_OK, scheduler);
 
     if (/* If the resource state is unknown on the target, it will likely be
          * probed there.
          * Don't just consider it running there. We will get back here anyway in
          * case the probe detects it's running there.
          */
         !unknown_on_node(history->rsc, target)
         /* If the resource has newer state on the target after the migration
          * events, this migrate_to no longer matters for the target.
          */
         && !newer_state_after_migrate(history->rsc->id, target, history->xml,
                                       target_migrate_from, scheduler)) {
         /* The resource has no newer state on the target, so assume it's still
          * active there.
          * (if it is up).
          */
         pcmk_node_t *target_node = pcmk_find_node(scheduler, target);
 
         if (target_node && target_node->details->online) {
             native_add_running(history->rsc, target_node, scheduler, FALSE);
         }
 
     } else if (!non_monitor_after(history->rsc->id, source, history->xml,
                                   scheduler)) {
         /* We know the resource has newer state on the target, but this
          * migrate_to still matters for the source as long as there's no newer
          * non-monitor operation there.
          */
 
         // Mark node as having dangling migration so we can force a stop later
         history->rsc->private->dangling_migration_sources =
             g_list_prepend(history->rsc->private->dangling_migration_sources,
                            (gpointer) history->node);
     }
 }
 
 /*!
  * \internal
  * \brief Update resource role etc. after a failed migrate_from action
  *
  * \param[in,out] history  Parsed action result history
  */
 static void
 unpack_migrate_from_failure(struct action_history *history)
 {
     xmlNode *source_migrate_to = NULL;
     const char *source = NULL;
     const char *target = NULL;
     pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
 
     // Get source and target node names from XML
     if (get_migration_node_names(history->xml, NULL, history->node, &source,
                                  &target) != pcmk_rc_ok) {
         return;
     }
 
     /* If a migration failed, we have to assume the resource is active. Clones
      * are not allowed to migrate, so role can't be promoted.
      */
     history->rsc->private->orig_role = pcmk_role_started;
 
     // Check for a migrate_to on the source
     source_migrate_to = find_lrm_op(history->rsc->id, PCMK_ACTION_MIGRATE_TO,
                                     source, target, PCMK_OCF_OK, scheduler);
 
     if (/* If the resource state is unknown on the source, it will likely be
          * probed there.
          * Don't just consider it running there. We will get back here anyway in
          * case the probe detects it's running there.
          */
         !unknown_on_node(history->rsc, source)
         /* If the resource has newer state on the source after the migration
          * events, this migrate_from no longer matters for the source.
          */
         && !newer_state_after_migrate(history->rsc->id, source,
                                       source_migrate_to, history->xml,
                                       scheduler)) {
         /* The resource has no newer state on the source, so assume it's still
          * active there (if it is up).
          */
         pcmk_node_t *source_node = pcmk_find_node(scheduler, source);
 
         if (source_node && source_node->details->online) {
             native_add_running(history->rsc, source_node, scheduler, TRUE);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Add an action to cluster's list of failed actions
  *
  * \param[in,out] history  Parsed action result history
  */
 static void
 record_failed_op(struct action_history *history)
 {
     const pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
 
     if (!(history->node->details->online)) {
         return;
     }
 
     for (const xmlNode *xIter = scheduler->failed->children;
          xIter != NULL; xIter = xIter->next) {
 
         const char *key = pcmk__xe_history_key(xIter);
         const char *uname = crm_element_value(xIter, PCMK_XA_UNAME);
 
         if (pcmk__str_eq(history->key, key, pcmk__str_none)
             && pcmk__str_eq(uname, history->node->private->name,
                             pcmk__str_casei)) {
             crm_trace("Skipping duplicate entry %s on %s",
                       history->key, pcmk__node_name(history->node));
             return;
         }
     }
 
     crm_trace("Adding entry for %s on %s to failed action list",
               history->key, pcmk__node_name(history->node));
     crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->private->name);
     crm_xml_add(history->xml, PCMK__XA_RSC_ID, history->rsc->id);
     pcmk__xml_copy(scheduler->failed, history->xml);
 }
 
 static char *
 last_change_str(const xmlNode *xml_op)
 {
     time_t when;
     char *result = NULL;
 
     if (crm_element_value_epoch(xml_op, PCMK_XA_LAST_RC_CHANGE,
                                 &when) == pcmk_ok) {
         char *when_s = pcmk__epoch2str(&when, 0);
         const char *p = strchr(when_s, ' ');
 
         // Skip day of week to make message shorter
         if ((p != NULL) && (*(++p) != '\0')) {
             result = pcmk__str_copy(p);
         }
         free(when_s);
     }
 
     if (result == NULL) {
         result = pcmk__str_copy("unknown_time");
     }
 
     return result;
 }
 
 /*!
  * \internal
  * \brief Ban a resource (or its clone if an anonymous instance) from all nodes
  *
  * \param[in,out] rsc  Resource to ban
  */
 static void
 ban_from_all_nodes(pcmk_resource_t *rsc)
 {
     int score = -PCMK_SCORE_INFINITY;
     const pcmk_scheduler_t *scheduler = rsc->private->scheduler;
 
     if (rsc->private->parent != NULL) {
         pcmk_resource_t *parent = uber_parent(rsc);
 
         if (pcmk__is_anonymous_clone(parent)) {
             /* For anonymous clones, if an operation with
              * PCMK_META_ON_FAIL=PCMK_VALUE_STOP fails for any instance, the
              * entire clone must stop.
              */
             rsc = parent;
         }
     }
 
     // Ban the resource from all nodes
     crm_notice("%s will not be started under current conditions", rsc->id);
     if (rsc->private->allowed_nodes != NULL) {
         g_hash_table_destroy(rsc->private->allowed_nodes);
     }
     rsc->private->allowed_nodes = pe__node_list2table(scheduler->nodes);
     g_hash_table_foreach(rsc->private->allowed_nodes, set_node_score, &score);
 }
 
 /*!
  * \internal
  * \brief Get configured failure handling and role after failure for an action
  *
  * \param[in,out] history    Unpacked action history entry
  * \param[out]    on_fail    Where to set configured failure handling
  * \param[out]    fail_role  Where to set to role after failure
  */
 static void
 unpack_failure_handling(struct action_history *history,
                         enum pcmk__on_fail *on_fail,
                         enum rsc_role_e *fail_role)
 {
     xmlNode *config = pcmk__find_action_config(history->rsc, history->task,
                                                history->interval_ms, true);
 
     GHashTable *meta = pcmk__unpack_action_meta(history->rsc, history->node,
                                                 history->task,
                                                 history->interval_ms, config);
 
     const char *on_fail_str = g_hash_table_lookup(meta, PCMK_META_ON_FAIL);
 
     *on_fail = pcmk__parse_on_fail(history->rsc, history->task,
                                    history->interval_ms, on_fail_str);
     *fail_role = pcmk__role_after_failure(history->rsc, history->task, *on_fail,
                                           meta);
     g_hash_table_destroy(meta);
 }
 
 /*!
  * \internal
  * \brief Update resource role, failure handling, etc., after a failed action
  *
  * \param[in,out] history         Parsed action result history
  * \param[in]     config_on_fail  Action failure handling from configuration
  * \param[in]     fail_role       Resource's role after failure of this action
  * \param[out]    last_failure    This will be set to the history XML
  * \param[in,out] on_fail         Actual handling of action result
  */
 static void
 unpack_rsc_op_failure(struct action_history *history,
                       enum pcmk__on_fail config_on_fail,
                       enum rsc_role_e fail_role, xmlNode **last_failure,
                       enum pcmk__on_fail *on_fail)
 {
     bool is_probe = false;
     char *last_change_s = NULL;
     pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
 
     *last_failure = history->xml;
 
     is_probe = pcmk_xe_is_probe(history->xml);
     last_change_s = last_change_str(history->xml);
 
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_symmetric_cluster)
         && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
         crm_trace("Unexpected result (%s%s%s) was recorded for "
                   "%s of %s on %s at %s " QB_XS " exit-status=%d id=%s",
                   services_ocf_exitcode_str(history->exit_status),
                   (pcmk__str_empty(history->exit_reason)? "" : ": "),
                   pcmk__s(history->exit_reason, ""),
                   (is_probe? "probe" : history->task), history->rsc->id,
                   pcmk__node_name(history->node), last_change_s,
                   history->exit_status, history->id);
     } else {
-        pcmk__sched_warn("Unexpected result (%s%s%s) was recorded for %s of "
+        pcmk__sched_warn(scheduler,
+                         "Unexpected result (%s%s%s) was recorded for %s of "
                          "%s on %s at %s " QB_XS " exit-status=%d id=%s",
                          services_ocf_exitcode_str(history->exit_status),
                          (pcmk__str_empty(history->exit_reason)? "" : ": "),
                          pcmk__s(history->exit_reason, ""),
                          (is_probe? "probe" : history->task), history->rsc->id,
                          pcmk__node_name(history->node), last_change_s,
                          history->exit_status, history->id);
 
         if (is_probe && (history->exit_status != PCMK_OCF_OK)
             && (history->exit_status != PCMK_OCF_NOT_RUNNING)
             && (history->exit_status != PCMK_OCF_RUNNING_PROMOTED)) {
 
             /* A failed (not just unexpected) probe result could mean the user
              * didn't know resources will be probed even where they can't run.
              */
             crm_notice("If it is not possible for %s to run on %s, see "
                        "the " PCMK_XA_RESOURCE_DISCOVERY " option for location "
                        "constraints",
                        history->rsc->id, pcmk__node_name(history->node));
         }
 
         record_failed_op(history);
     }
 
     free(last_change_s);
 
     if (*on_fail < config_on_fail) {
         pcmk__rsc_trace(history->rsc, "on-fail %s -> %s for %s",
                         pcmk__on_fail_text(*on_fail),
                         pcmk__on_fail_text(config_on_fail), history->key);
         *on_fail = config_on_fail;
     }
 
     if (strcmp(history->task, PCMK_ACTION_STOP) == 0) {
         resource_location(history->rsc, history->node, -PCMK_SCORE_INFINITY,
                           "__stop_fail__", scheduler);
 
     } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0) {
         unpack_migrate_to_failure(history);
 
     } else if (strcmp(history->task, PCMK_ACTION_MIGRATE_FROM) == 0) {
         unpack_migrate_from_failure(history);
 
     } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
         history->rsc->private->orig_role = pcmk_role_promoted;
 
     } else if (strcmp(history->task, PCMK_ACTION_DEMOTE) == 0) {
         if (config_on_fail == pcmk__on_fail_block) {
             history->rsc->private->orig_role = pcmk_role_promoted;
             pe__set_next_role(history->rsc, pcmk_role_stopped,
                               "demote with " PCMK_META_ON_FAIL "=block");
 
         } else if (history->exit_status == PCMK_OCF_NOT_RUNNING) {
             history->rsc->private->orig_role = pcmk_role_stopped;
 
         } else {
             /* Staying in the promoted role would put the scheduler and
              * controller into a loop. Setting the role to unpromoted is not
              * dangerous because the resource will be stopped as part of
              * recovery, and any promotion will be ordered after that stop.
              */
             history->rsc->private->orig_role = pcmk_role_unpromoted;
         }
     }
 
     if (is_probe && (history->exit_status == PCMK_OCF_NOT_INSTALLED)) {
         /* leave stopped */
         pcmk__rsc_trace(history->rsc, "Leaving %s stopped", history->rsc->id);
         history->rsc->private->orig_role = pcmk_role_stopped;
 
     } else if (history->rsc->private->orig_role < pcmk_role_started) {
         pcmk__rsc_trace(history->rsc, "Setting %s active", history->rsc->id);
         set_active(history->rsc);
     }
 
     pcmk__rsc_trace(history->rsc,
                     "Resource %s: role=%s unclean=%s on_fail=%s fail_role=%s",
                     history->rsc->id,
                     pcmk_role_text(history->rsc->private->orig_role),
                     pcmk__btoa(history->node->details->unclean),
                     pcmk__on_fail_text(config_on_fail),
                     pcmk_role_text(fail_role));
 
     if ((fail_role != pcmk_role_started)
         && (history->rsc->private->next_role < fail_role)) {
         pe__set_next_role(history->rsc, fail_role, "failure");
     }
 
     if (fail_role == pcmk_role_stopped) {
         ban_from_all_nodes(history->rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Block a resource with a failed action if it cannot be recovered
  *
  * If resource action is a failed stop and fencing is not possible, mark the
  * resource as unmanaged and blocked, since recovery cannot be done.
  *
  * \param[in,out] history  Parsed action history entry
  */
 static void
 block_if_unrecoverable(struct action_history *history)
 {
     char *last_change_s = NULL;
 
     if (strcmp(history->task, PCMK_ACTION_STOP) != 0) {
         return; // All actions besides stop are always recoverable
     }
     if (pe_can_fence(history->node->private->scheduler, history->node)) {
         return; // Failed stops are recoverable via fencing
     }
 
     last_change_s = last_change_str(history->xml);
-    pcmk__sched_err("No further recovery can be attempted for %s "
+    pcmk__sched_err(history->node->private->scheduler,
+                    "No further recovery can be attempted for %s "
                     "because %s on %s failed (%s%s%s) at %s "
                     QB_XS " rc=%d id=%s",
                     history->rsc->id, history->task,
                     pcmk__node_name(history->node),
                     services_ocf_exitcode_str(history->exit_status),
                     (pcmk__str_empty(history->exit_reason)? "" : ": "),
                     pcmk__s(history->exit_reason, ""),
                     last_change_s, history->exit_status, history->id);
 
     free(last_change_s);
 
     pcmk__clear_rsc_flags(history->rsc, pcmk__rsc_managed);
     pcmk__set_rsc_flags(history->rsc, pcmk__rsc_blocked);
 }
 
 /*!
  * \internal
  * \brief Update action history's execution status and why
  *
  * \param[in,out] history  Parsed action history entry
  * \param[out]    why      Where to store reason for update
  * \param[in]     value    New value
  * \param[in]     reason   Description of why value was changed
  */
 static inline void
 remap_because(struct action_history *history, const char **why, int value,
               const char *reason)
 {
     if (history->execution_status != value) {
         history->execution_status = value;
         *why = reason;
     }
 }
 
 /*!
  * \internal
  * \brief Remap informational monitor results and operation status
  *
  * For the monitor results, certain OCF codes are for providing extended information
  * to the user about services that aren't yet failed but not entirely healthy either.
  * These must be treated as the "normal" result by Pacemaker.
  *
  * For operation status, the action result can be used to determine an appropriate
  * status for the purposes of responding to the action.  The status provided by the
  * executor is not directly usable since the executor does not know what was expected.
  *
  * \param[in,out] history  Parsed action history entry
  * \param[in,out] on_fail  What should be done about the result
  * \param[in]     expired  Whether result is expired
  *
  * \note If the result is remapped and the node is not shutting down or failed,
  *       the operation will be recorded in the scheduler data's list of failed
  *       operations to highlight it for the user.
  *
  * \note This may update the resource's current and next role.
  */
 static void
 remap_operation(struct action_history *history,
                 enum pcmk__on_fail *on_fail, bool expired)
 {
     bool is_probe = false;
     int orig_exit_status = history->exit_status;
     int orig_exec_status = history->execution_status;
     const char *why = NULL;
     const char *task = history->task;
 
     // Remap degraded results to their successful counterparts
     history->exit_status = pcmk__effective_rc(history->exit_status);
     if (history->exit_status != orig_exit_status) {
         why = "degraded result";
         if (!expired && (!history->node->details->shutdown
                          || history->node->details->online)) {
             record_failed_op(history);
         }
     }
 
     if (!pcmk__is_bundled(history->rsc)
         && pcmk_xe_mask_probe_failure(history->xml)
         && ((history->execution_status != PCMK_EXEC_DONE)
             || (history->exit_status != PCMK_OCF_NOT_RUNNING))) {
         history->execution_status = PCMK_EXEC_DONE;
         history->exit_status = PCMK_OCF_NOT_RUNNING;
         why = "equivalent probe result";
     }
 
     /* If the executor reported an execution status of anything but done or
      * error, consider that final. But for done or error, we know better whether
      * it should be treated as a failure or not, because we know the expected
      * result.
      */
     switch (history->execution_status) {
         case PCMK_EXEC_DONE:
         case PCMK_EXEC_ERROR:
             break;
 
         // These should be treated as node-fatal
         case PCMK_EXEC_NO_FENCE_DEVICE:
         case PCMK_EXEC_NO_SECRETS:
             remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
                           "node-fatal error");
             goto remap_done;
 
         default:
             goto remap_done;
     }
 
     is_probe = pcmk_xe_is_probe(history->xml);
     if (is_probe) {
         task = "probe";
     }
 
     if (history->expected_exit_status < 0) {
         /* Pre-1.0 Pacemaker versions, and Pacemaker 1.1.6 or earlier with
          * Heartbeat 2.0.7 or earlier as the cluster layer, did not include the
          * expected exit status in the transition key, which (along with the
          * similar case of a corrupted transition key in the CIB) will be
          * reported to this function as -1. Pacemaker 2.0+ does not support
          * rolling upgrades from those versions or processing of saved CIB files
          * from those versions, so we do not need to care much about this case.
          */
         remap_because(history, &why, PCMK_EXEC_ERROR,
                       "obsolete history format");
         pcmk__config_warn("Expected result not found for %s on %s "
                           "(corrupt or obsolete CIB?)",
                           history->key, pcmk__node_name(history->node));
 
     } else if (history->exit_status == history->expected_exit_status) {
         remap_because(history, &why, PCMK_EXEC_DONE, "expected result");
 
     } else {
         remap_because(history, &why, PCMK_EXEC_ERROR, "unexpected result");
         pcmk__rsc_debug(history->rsc,
                         "%s on %s: expected %d (%s), got %d (%s%s%s)",
                         history->key, pcmk__node_name(history->node),
                         history->expected_exit_status,
                         services_ocf_exitcode_str(history->expected_exit_status),
                         history->exit_status,
                         services_ocf_exitcode_str(history->exit_status),
                         (pcmk__str_empty(history->exit_reason)? "" : ": "),
                         pcmk__s(history->exit_reason, ""));
     }
 
     switch (history->exit_status) {
         case PCMK_OCF_OK:
             if (is_probe
                 && (history->expected_exit_status == PCMK_OCF_NOT_RUNNING)) {
                 char *last_change_s = last_change_str(history->xml);
 
                 remap_because(history, &why, PCMK_EXEC_DONE, "probe");
                 pcmk__rsc_info(history->rsc,
                                "Probe found %s active on %s at %s",
                                history->rsc->id, pcmk__node_name(history->node),
                                last_change_s);
                 free(last_change_s);
             }
             break;
 
         case PCMK_OCF_NOT_RUNNING:
             if (is_probe
                 || (history->expected_exit_status == history->exit_status)
                 || !pcmk_is_set(history->rsc->flags, pcmk__rsc_managed)) {
 
                 /* For probes, recurring monitors for the Stopped role, and
                  * unmanaged resources, "not running" is not considered a
                  * failure.
                  */
                 remap_because(history, &why, PCMK_EXEC_DONE, "exit status");
                 history->rsc->private->orig_role = pcmk_role_stopped;
                 *on_fail = pcmk__on_fail_ignore;
                 pe__set_next_role(history->rsc, pcmk_role_unknown,
                                   "not running");
             }
             break;
 
         case PCMK_OCF_RUNNING_PROMOTED:
             if (is_probe
                 && (history->exit_status != history->expected_exit_status)) {
                 char *last_change_s = last_change_str(history->xml);
 
                 remap_because(history, &why, PCMK_EXEC_DONE, "probe");
                 pcmk__rsc_info(history->rsc,
                                "Probe found %s active and promoted on %s at %s",
                                 history->rsc->id,
                                 pcmk__node_name(history->node), last_change_s);
                 free(last_change_s);
             }
             if (!expired
                 || (history->exit_status == history->expected_exit_status)) {
                 history->rsc->private->orig_role = pcmk_role_promoted;
             }
             break;
 
         case PCMK_OCF_FAILED_PROMOTED:
             if (!expired) {
                 history->rsc->private->orig_role = pcmk_role_promoted;
             }
             remap_because(history, &why, PCMK_EXEC_ERROR, "exit status");
             break;
 
         case PCMK_OCF_NOT_CONFIGURED:
             remap_because(history, &why, PCMK_EXEC_ERROR_FATAL, "exit status");
             break;
 
         case PCMK_OCF_UNIMPLEMENT_FEATURE:
             {
                 guint interval_ms = 0;
                 crm_element_value_ms(history->xml, PCMK_META_INTERVAL,
                                      &interval_ms);
 
                 if (interval_ms == 0) {
                     if (!expired) {
                         block_if_unrecoverable(history);
                     }
                     remap_because(history, &why, PCMK_EXEC_ERROR_HARD,
                                   "exit status");
                 } else {
                     remap_because(history, &why, PCMK_EXEC_NOT_SUPPORTED,
                                   "exit status");
                 }
             }
             break;
 
         case PCMK_OCF_NOT_INSTALLED:
         case PCMK_OCF_INVALID_PARAM:
         case PCMK_OCF_INSUFFICIENT_PRIV:
             if (!expired) {
                 block_if_unrecoverable(history);
             }
             remap_because(history, &why, PCMK_EXEC_ERROR_HARD, "exit status");
             break;
 
         default:
             if (history->execution_status == PCMK_EXEC_DONE) {
                 char *last_change_s = last_change_str(history->xml);
 
                 crm_info("Treating unknown exit status %d from %s of %s "
                          "on %s at %s as failure",
                          history->exit_status, task, history->rsc->id,
                          pcmk__node_name(history->node), last_change_s);
                 remap_because(history, &why, PCMK_EXEC_ERROR,
                               "unknown exit status");
                 free(last_change_s);
             }
             break;
     }
 
 remap_done:
     if (why != NULL) {
         pcmk__rsc_trace(history->rsc,
                         "Remapped %s result from [%s: %s] to [%s: %s] "
                         "because of %s",
                         history->key, pcmk_exec_status_str(orig_exec_status),
                         crm_exit_str(orig_exit_status),
                         pcmk_exec_status_str(history->execution_status),
                         crm_exit_str(history->exit_status), why);
     }
 }
 
 // return TRUE if start or monitor last failure but parameters changed
 static bool
 should_clear_for_param_change(const xmlNode *xml_op, const char *task,
                               pcmk_resource_t *rsc, pcmk_node_t *node)
 {
     if (pcmk__str_any_of(task, PCMK_ACTION_START, PCMK_ACTION_MONITOR, NULL)) {
         if (pe__bundle_needs_remote_name(rsc)) {
             /* We haven't allocated resources yet, so we can't reliably
              * substitute addr parameters for the REMOTE_CONTAINER_HACK.
              * When that's needed, defer the check until later.
              */
             pe__add_param_check(xml_op, rsc, node, pcmk__check_last_failure,
                                 rsc->private->scheduler);
 
         } else {
             pcmk__op_digest_t *digest_data = NULL;
 
             digest_data = rsc_action_digest_cmp(rsc, xml_op, node,
                                                 rsc->private->scheduler);
             switch (digest_data->rc) {
                 case pcmk__digest_unknown:
                     crm_trace("Resource %s history entry %s on %s"
                               " has no digest to compare",
                               rsc->id, pcmk__xe_history_key(xml_op),
                               node->private->id);
                     break;
                 case pcmk__digest_match:
                     break;
                 default:
                     return TRUE;
             }
         }
     }
     return FALSE;
 }
 
 // Order action after fencing of remote node, given connection rsc
 static void
 order_after_remote_fencing(pcmk_action_t *action, pcmk_resource_t *remote_conn,
                            pcmk_scheduler_t *scheduler)
 {
     pcmk_node_t *remote_node = pcmk_find_node(scheduler, remote_conn->id);
 
     if (remote_node) {
         pcmk_action_t *fence = pe_fence_op(remote_node, NULL, TRUE, NULL,
                                            FALSE, scheduler);
 
         order_actions(fence, action, pcmk__ar_first_implies_then);
     }
 }
 
 static bool
 should_ignore_failure_timeout(const pcmk_resource_t *rsc, const char *task,
                               guint interval_ms, bool is_last_failure)
 {
     /* Clearing failures of recurring monitors has special concerns. The
      * executor reports only changes in the monitor result, so if the
      * monitor is still active and still getting the same failure result,
      * that will go undetected after the failure is cleared.
      *
      * Also, the operation history will have the time when the recurring
      * monitor result changed to the given code, not the time when the
      * result last happened.
      *
      * @TODO We probably should clear such failures only when the failure
      * timeout has passed since the last occurrence of the failed result.
      * However we don't record that information. We could maybe approximate
      * that by clearing only if there is a more recent successful monitor or
      * stop result, but we don't even have that information at this point
      * since we are still unpacking the resource's operation history.
      *
      * This is especially important for remote connection resources with a
      * reconnect interval, so in that case, we skip clearing failures
      * if the remote node hasn't been fenced.
      */
     if ((rsc->private->remote_reconnect_ms > 0U)
         && pcmk_is_set(rsc->private->scheduler->flags,
                        pcmk_sched_fencing_enabled)
         && (interval_ms != 0)
         && pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
 
         pcmk_node_t *remote_node = pcmk_find_node(rsc->private->scheduler,
                                                   rsc->id);
 
         if (remote_node && !pcmk_is_set(remote_node->private->flags,
                                         pcmk__node_remote_fenced)) {
             if (is_last_failure) {
                 crm_info("Waiting to clear monitor failure for remote node %s"
                          " until fencing has occurred", rsc->id);
             }
             return TRUE;
         }
     }
     return FALSE;
 }
 
 /*!
  * \internal
  * \brief Check operation age and schedule failure clearing when appropriate
  *
  * This function has two distinct purposes. The first is to check whether an
  * operation history entry is expired (i.e. the resource has a failure timeout,
  * the entry is older than the timeout, and the resource either has no fail
  * count or its fail count is entirely older than the timeout). The second is to
  * schedule fail count clearing when appropriate (i.e. the operation is expired
  * and either the resource has an expired fail count or the operation is a
  * last_failure for a remote connection resource with a reconnect interval,
  * or the operation is a last_failure for a start or monitor operation and the
  * resource's parameters have changed since the operation).
  *
  * \param[in,out] history  Parsed action result history
  *
  * \return true if operation history entry is expired, otherwise false
  */
 static bool
 check_operation_expiry(struct action_history *history)
 {
     bool expired = false;
     bool is_last_failure = pcmk__ends_with(history->id, "_last_failure_0");
     time_t last_run = 0;
     int unexpired_fail_count = 0;
     const char *clear_reason = NULL;
     const guint expiration_sec =
         history->rsc->private->failure_expiration_ms / 1000;
     pcmk_scheduler_t *scheduler = history->rsc->private->scheduler;
 
     if (history->execution_status == PCMK_EXEC_NOT_INSTALLED) {
         pcmk__rsc_trace(history->rsc,
                         "Resource history entry %s on %s is not expired: "
                         "Not Installed does not expire",
                         history->id, pcmk__node_name(history->node));
         return false; // "Not installed" must always be cleared manually
     }
 
     if ((expiration_sec > 0)
         && (crm_element_value_epoch(history->xml, PCMK_XA_LAST_RC_CHANGE,
                                     &last_run) == 0)) {
 
         /* Resource has a PCMK_META_FAILURE_TIMEOUT and history entry has a
          * timestamp
          */
 
         time_t now = get_effective_time(scheduler);
         time_t last_failure = 0;
 
         // Is this particular operation history older than the failure timeout?
         if ((now >= (last_run + expiration_sec))
             && !should_ignore_failure_timeout(history->rsc, history->task,
                                               history->interval_ms,
                                               is_last_failure)) {
             expired = true;
         }
 
         // Does the resource as a whole have an unexpired fail count?
         unexpired_fail_count = pe_get_failcount(history->node, history->rsc,
                                                 &last_failure,
                                                 pcmk__fc_effective,
                                                 history->xml);
 
         // Update scheduler recheck time according to *last* failure
         crm_trace("%s@%lld is %sexpired @%lld with unexpired_failures=%d "
                   "expiration=%s last-failure@%lld",
                   history->id, (long long) last_run, (expired? "" : "not "),
                   (long long) now, unexpired_fail_count,
                   pcmk__readable_interval(expiration_sec * 1000),
                   (long long) last_failure);
         last_failure += expiration_sec + 1;
         if (unexpired_fail_count && (now < last_failure)) {
             pe__update_recheck_time(last_failure, scheduler,
                                     "fail count expiration");
         }
     }
 
     if (expired) {
         if (pe_get_failcount(history->node, history->rsc, NULL,
                              pcmk__fc_default, history->xml)) {
             // There is a fail count ignoring timeout
 
             if (unexpired_fail_count == 0) {
                 // There is no fail count considering timeout
                 clear_reason = "it expired";
 
             } else {
                 /* This operation is old, but there is an unexpired fail count.
                  * In a properly functioning cluster, this should only be
                  * possible if this operation is not a failure (otherwise the
                  * fail count should be expired too), so this is really just a
                  * failsafe.
                  */
                 pcmk__rsc_trace(history->rsc,
                                 "Resource history entry %s on %s is not "
                                 "expired: Unexpired fail count",
                                 history->id, pcmk__node_name(history->node));
                 expired = false;
             }
 
         } else if (is_last_failure
                    && (history->rsc->private->remote_reconnect_ms > 0U)) {
             /* Clear any expired last failure when reconnect interval is set,
              * even if there is no fail count.
              */
             clear_reason = "reconnect interval is set";
         }
     }
 
     if (!expired && is_last_failure
         && should_clear_for_param_change(history->xml, history->task,
                                          history->rsc, history->node)) {
         clear_reason = "resource parameters have changed";
     }
 
     if (clear_reason != NULL) {
         pcmk_action_t *clear_op = NULL;
 
         // Schedule clearing of the fail count
         clear_op = pe__clear_failcount(history->rsc, history->node,
                                        clear_reason, scheduler);
 
         if (pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)
             && (history->rsc->private->remote_reconnect_ms > 0)) {
             /* If we're clearing a remote connection due to a reconnect
              * interval, we want to wait until any scheduled fencing
              * completes.
              *
              * We could limit this to remote_node->details->unclean, but at
              * this point, that's always true (it won't be reliable until
              * after unpack_node_history() is done).
              */
             crm_info("Clearing %s failure will wait until any scheduled "
                      "fencing of %s completes",
                      history->task, history->rsc->id);
             order_after_remote_fencing(clear_op, history->rsc, scheduler);
         }
     }
 
     if (expired && (history->interval_ms == 0)
         && pcmk__str_eq(history->task, PCMK_ACTION_MONITOR, pcmk__str_none)) {
         switch (history->exit_status) {
             case PCMK_OCF_OK:
             case PCMK_OCF_NOT_RUNNING:
             case PCMK_OCF_RUNNING_PROMOTED:
             case PCMK_OCF_DEGRADED:
             case PCMK_OCF_DEGRADED_PROMOTED:
                 // Don't expire probes that return these values
                 pcmk__rsc_trace(history->rsc,
                                 "Resource history entry %s on %s is not "
                                 "expired: Probe result",
                              history->id, pcmk__node_name(history->node));
                 expired = false;
                 break;
         }
     }
 
     return expired;
 }
 
 int
 pe__target_rc_from_xml(const xmlNode *xml_op)
 {
     int target_rc = 0;
     const char *key = crm_element_value(xml_op, PCMK__XA_TRANSITION_KEY);
 
     if (key == NULL) {
         return -1;
     }
     decode_transition_key(key, NULL, NULL, NULL, &target_rc);
     return target_rc;
 }
 
 /*!
  * \internal
  * \brief Update a resource's state for an action result
  *
  * \param[in,out] history       Parsed action history entry
  * \param[in]     exit_status   Exit status to base new state on
  * \param[in]     last_failure  Resource's last_failure entry, if known
  * \param[in,out] on_fail       Resource's current failure handling
  */
 static void
 update_resource_state(struct action_history *history, int exit_status,
                       const xmlNode *last_failure,
                       enum pcmk__on_fail *on_fail)
 {
     bool clear_past_failure = false;
 
     if ((exit_status == PCMK_OCF_NOT_INSTALLED)
         || (!pcmk__is_bundled(history->rsc)
             && pcmk_xe_mask_probe_failure(history->xml))) {
         history->rsc->private->orig_role = pcmk_role_stopped;
 
     } else if (exit_status == PCMK_OCF_NOT_RUNNING) {
         clear_past_failure = true;
 
     } else if (pcmk__str_eq(history->task, PCMK_ACTION_MONITOR,
                             pcmk__str_none)) {
         if ((last_failure != NULL)
             && pcmk__str_eq(history->key, pcmk__xe_history_key(last_failure),
                             pcmk__str_none)) {
             clear_past_failure = true;
         }
         if (history->rsc->private->orig_role < pcmk_role_started) {
             set_active(history->rsc);
         }
 
     } else if (pcmk__str_eq(history->task, PCMK_ACTION_START, pcmk__str_none)) {
         history->rsc->private->orig_role = pcmk_role_started;
         clear_past_failure = true;
 
     } else if (pcmk__str_eq(history->task, PCMK_ACTION_STOP, pcmk__str_none)) {
         history->rsc->private->orig_role = pcmk_role_stopped;
         clear_past_failure = true;
 
     } else if (pcmk__str_eq(history->task, PCMK_ACTION_PROMOTE,
                             pcmk__str_none)) {
         history->rsc->private->orig_role = pcmk_role_promoted;
         clear_past_failure = true;
 
     } else if (pcmk__str_eq(history->task, PCMK_ACTION_DEMOTE,
                             pcmk__str_none)) {
         if (*on_fail == pcmk__on_fail_demote) {
             /* Demote clears an error only if
              * PCMK_META_ON_FAIL=PCMK_VALUE_DEMOTE
              */
             clear_past_failure = true;
         }
         history->rsc->private->orig_role = pcmk_role_unpromoted;
 
     } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_FROM,
                             pcmk__str_none)) {
         history->rsc->private->orig_role = pcmk_role_started;
         clear_past_failure = true;
 
     } else if (pcmk__str_eq(history->task, PCMK_ACTION_MIGRATE_TO,
                             pcmk__str_none)) {
         unpack_migrate_to_success(history);
 
     } else if (history->rsc->private->orig_role < pcmk_role_started) {
         pcmk__rsc_trace(history->rsc, "%s active on %s",
                         history->rsc->id, pcmk__node_name(history->node));
         set_active(history->rsc);
     }
 
     if (!clear_past_failure) {
         return;
     }
 
     switch (*on_fail) {
         case pcmk__on_fail_stop:
         case pcmk__on_fail_ban:
         case pcmk__on_fail_standby_node:
         case pcmk__on_fail_fence_node:
             pcmk__rsc_trace(history->rsc,
                             "%s (%s) is not cleared by a completed %s",
                             history->rsc->id, pcmk__on_fail_text(*on_fail),
                             history->task);
             break;
 
         case pcmk__on_fail_block:
         case pcmk__on_fail_ignore:
         case pcmk__on_fail_demote:
         case pcmk__on_fail_restart:
         case pcmk__on_fail_restart_container:
             *on_fail = pcmk__on_fail_ignore;
             pe__set_next_role(history->rsc, pcmk_role_unknown,
                               "clear past failures");
             break;
 
         case pcmk__on_fail_reset_remote:
             if (history->rsc->private->remote_reconnect_ms == 0U) {
                 /* With no reconnect interval, the connection is allowed to
                  * start again after the remote node is fenced and
                  * completely stopped. (With a reconnect interval, we wait
                  * for the failure to be cleared entirely before attempting
                  * to reconnect.)
                  */
                 *on_fail = pcmk__on_fail_ignore;
                 pe__set_next_role(history->rsc, pcmk_role_unknown,
                                   "clear past failures and reset remote");
             }
             break;
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a given history entry matters for resource state
  *
  * \param[in] history  Parsed action history entry
  *
  * \return true if action can affect resource state, otherwise false
  */
 static inline bool
 can_affect_state(struct action_history *history)
 {
 #if 0
     /* @COMPAT It might be better to parse only actions we know we're interested
      * in, rather than exclude a couple we don't. However that would be a
      * behavioral change that should be done at a major or minor series release.
      * Currently, unknown operations can affect whether a resource is considered
      * active and/or failed.
      */
      return pcmk__str_any_of(history->task, PCMK_ACTION_MONITOR,
                              PCMK_ACTION_START, PCMK_ACTION_STOP,
                              PCMK_ACTION_PROMOTE, PCMK_ACTION_DEMOTE,
                              PCMK_ACTION_MIGRATE_TO, PCMK_ACTION_MIGRATE_FROM,
                              "asyncmon", NULL);
 #else
      return !pcmk__str_any_of(history->task, PCMK_ACTION_NOTIFY,
                               PCMK_ACTION_META_DATA, NULL);
 #endif
 }
 
 /*!
  * \internal
  * \brief Unpack execution/exit status and exit reason from a history entry
  *
  * \param[in,out] history  Action history entry to unpack
  *
  * \return Standard Pacemaker return code
  */
 static int
 unpack_action_result(struct action_history *history)
 {
     if ((crm_element_value_int(history->xml, PCMK__XA_OP_STATUS,
                                &(history->execution_status)) < 0)
         || (history->execution_status < PCMK_EXEC_PENDING)
         || (history->execution_status > PCMK_EXEC_MAX)
         || (history->execution_status == PCMK_EXEC_CANCELLED)) {
         pcmk__config_err("Ignoring resource history entry %s for %s on %s "
                          "with invalid " PCMK__XA_OP_STATUS " '%s'",
                          history->id, history->rsc->id,
                          pcmk__node_name(history->node),
                          pcmk__s(crm_element_value(history->xml,
                                                    PCMK__XA_OP_STATUS),
                                  ""));
         return pcmk_rc_unpack_error;
     }
     if ((crm_element_value_int(history->xml, PCMK__XA_RC_CODE,
                                &(history->exit_status)) < 0)
         || (history->exit_status < 0) || (history->exit_status > CRM_EX_MAX)) {
 #if 0
         /* @COMPAT We should ignore malformed entries, but since that would
          * change behavior, it should be done at a major or minor series
          * release.
          */
         pcmk__config_err("Ignoring resource history entry %s for %s on %s "
                          "with invalid " PCMK__XA_RC_CODE " '%s'",
                          history->id, history->rsc->id,
                          pcmk__node_name(history->node),
                          pcmk__s(crm_element_value(history->xml,
                                                    PCMK__XA_RC_CODE),
                                  ""));
         return pcmk_rc_unpack_error;
 #else
         history->exit_status = CRM_EX_ERROR;
 #endif
     }
     history->exit_reason = crm_element_value(history->xml, PCMK_XA_EXIT_REASON);
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Process an action history entry whose result expired
  *
  * \param[in,out] history           Parsed action history entry
  * \param[in]     orig_exit_status  Action exit status before remapping
  *
  * \return Standard Pacemaker return code (in particular, pcmk_rc_ok means the
  *         entry needs no further processing)
  */
 static int
 process_expired_result(struct action_history *history, int orig_exit_status)
 {
     if (!pcmk__is_bundled(history->rsc)
         && pcmk_xe_mask_probe_failure(history->xml)
         && (orig_exit_status != history->expected_exit_status)) {
 
         if (history->rsc->private->orig_role <= pcmk_role_stopped) {
             history->rsc->private->orig_role = pcmk_role_unknown;
         }
         crm_trace("Ignoring resource history entry %s for probe of %s on %s: "
                   "Masked failure expired",
                   history->id, history->rsc->id,
                   pcmk__node_name(history->node));
         return pcmk_rc_ok;
     }
 
     if (history->exit_status == history->expected_exit_status) {
         return pcmk_rc_undetermined; // Only failures expire
     }
 
     if (history->interval_ms == 0) {
         crm_notice("Ignoring resource history entry %s for %s of %s on %s: "
                    "Expired failure",
                    history->id, history->task, history->rsc->id,
                    pcmk__node_name(history->node));
         return pcmk_rc_ok;
     }
 
     if (history->node->details->online && !history->node->details->unclean) {
         /* Reschedule the recurring action. schedule_cancel() won't work at
          * this stage, so as a hacky workaround, forcibly change the restart
          * digest so pcmk__check_action_config() does what we want later.
          *
          * @TODO We should skip this if there is a newer successful monitor.
          *       Also, this causes rescheduling only if the history entry
          *       has a PCMK__XA_OP_DIGEST (which the expire-non-blocked-failure
          *       scheduler regression test doesn't, but that may not be a
          *       realistic scenario in production).
          */
         crm_notice("Rescheduling %s-interval %s of %s on %s "
                    "after failure expired",
                    pcmk__readable_interval(history->interval_ms), history->task,
                    history->rsc->id, pcmk__node_name(history->node));
         crm_xml_add(history->xml, PCMK__XA_OP_RESTART_DIGEST,
                     "calculated-failure-timeout");
         return pcmk_rc_ok;
     }
 
     return pcmk_rc_undetermined;
 }
 
 /*!
  * \internal
  * \brief Process a masked probe failure
  *
  * \param[in,out] history           Parsed action history entry
  * \param[in]     orig_exit_status  Action exit status before remapping
  * \param[in]     last_failure      Resource's last_failure entry, if known
  * \param[in,out] on_fail           Resource's current failure handling
  */
 static void
 mask_probe_failure(struct action_history *history, int orig_exit_status,
                    const xmlNode *last_failure,
                    enum pcmk__on_fail *on_fail)
 {
     pcmk_resource_t *ban_rsc = history->rsc;
 
     if (!pcmk_is_set(history->rsc->flags, pcmk__rsc_unique)) {
         ban_rsc = uber_parent(history->rsc);
     }
 
     crm_notice("Treating probe result '%s' for %s on %s as 'not running'",
                services_ocf_exitcode_str(orig_exit_status), history->rsc->id,
                pcmk__node_name(history->node));
     update_resource_state(history, history->expected_exit_status, last_failure,
                           on_fail);
     crm_xml_add(history->xml, PCMK_XA_UNAME, history->node->private->name);
 
     record_failed_op(history);
     resource_location(ban_rsc, history->node, -PCMK_SCORE_INFINITY,
                       "masked-probe-failure", ban_rsc->private->scheduler);
 }
 
 /*!
  * \internal Check whether a given failure is for a given pending action
  *
  * \param[in] history       Parsed history entry for pending action
  * \param[in] last_failure  Resource's last_failure entry, if known
  *
  * \return true if \p last_failure is failure of pending action in \p history,
  *         otherwise false
  * \note Both \p history and \p last_failure must come from the same
  *       \c PCMK__XE_LRM_RESOURCE block, as node and resource are assumed to be
  *       the same.
  */
 static bool
 failure_is_newer(const struct action_history *history,
                  const xmlNode *last_failure)
 {
     guint failure_interval_ms = 0U;
     long long failure_change = 0LL;
     long long this_change = 0LL;
 
     if (last_failure == NULL) {
         return false; // Resource has no last_failure entry
     }
 
     if (!pcmk__str_eq(history->task,
                       crm_element_value(last_failure, PCMK_XA_OPERATION),
                       pcmk__str_none)) {
         return false; // last_failure is for different action
     }
 
     if ((crm_element_value_ms(last_failure, PCMK_META_INTERVAL,
                               &failure_interval_ms) != pcmk_ok)
         || (history->interval_ms != failure_interval_ms)) {
         return false; // last_failure is for action with different interval
     }
 
     if ((pcmk__scan_ll(crm_element_value(history->xml, PCMK_XA_LAST_RC_CHANGE),
                        &this_change, 0LL) != pcmk_rc_ok)
         || (pcmk__scan_ll(crm_element_value(last_failure,
                                             PCMK_XA_LAST_RC_CHANGE),
                           &failure_change, 0LL) != pcmk_rc_ok)
         || (failure_change < this_change)) {
         return false; // Failure is not known to be newer
     }
 
     return true;
 }
 
 /*!
  * \internal
  * \brief Update a resource's role etc. for a pending action
  *
  * \param[in,out] history       Parsed history entry for pending action
  * \param[in]     last_failure  Resource's last_failure entry, if known
  */
 static void
 process_pending_action(struct action_history *history,
                        const xmlNode *last_failure)
 {
     /* For recurring monitors, a failure is recorded only in RSC_last_failure_0,
      * and there might be a RSC_monitor_INTERVAL entry with the last successful
      * or pending result.
      *
      * If last_failure contains the failure of the pending recurring monitor
      * we're processing here, and is newer, the action is no longer pending.
      * (Pending results have call ID -1, which sorts last, so the last failure
      * if any should be known.)
      */
     if (failure_is_newer(history, last_failure)) {
         return;
     }
 
     if (strcmp(history->task, PCMK_ACTION_START) == 0) {
         pcmk__set_rsc_flags(history->rsc, pcmk__rsc_start_pending);
         set_active(history->rsc);
 
     } else if (strcmp(history->task, PCMK_ACTION_PROMOTE) == 0) {
         history->rsc->private->orig_role = pcmk_role_promoted;
 
     } else if ((strcmp(history->task, PCMK_ACTION_MIGRATE_TO) == 0)
                && history->node->details->unclean) {
         /* A migrate_to action is pending on a unclean source, so force a stop
          * on the target.
          */
         const char *migrate_target = NULL;
         pcmk_node_t *target = NULL;
 
         migrate_target = crm_element_value(history->xml,
                                            PCMK__META_MIGRATE_TARGET);
         target = pcmk_find_node(history->rsc->private->scheduler,
                                 migrate_target);
         if (target != NULL) {
             stop_action(history->rsc, target, FALSE);
         }
     }
 
     if (history->rsc->private->pending_action != NULL) {
         /* There should never be multiple pending actions, but as a failsafe,
          * just remember the first one processed for display purposes.
          */
         return;
     }
 
     if (pcmk_is_probe(history->task, history->interval_ms)) {
         /* Pending probes are currently never displayed, even if pending
          * operations are requested. If we ever want to change that,
          * enable the below and the corresponding part of
          * native.c:native_pending_action().
          */
 #if 0
         history->rsc->private->pending_action = strdup("probe");
         history->rsc->private->pending_node = history->node;
 #endif
     } else {
         history->rsc->private->pending_action = strdup(history->task);
         history->rsc->private->pending_node = history->node;
     }
 }
 
 static void
 unpack_rsc_op(pcmk_resource_t *rsc, pcmk_node_t *node, xmlNode *xml_op,
               xmlNode **last_failure, enum pcmk__on_fail *on_fail)
 {
     int old_rc = 0;
     bool expired = false;
     pcmk_resource_t *parent = rsc;
     enum rsc_role_e fail_role = pcmk_role_unknown;
     enum pcmk__on_fail failure_strategy = pcmk__on_fail_restart;
 
     struct action_history history = {
         .rsc = rsc,
         .node = node,
         .xml = xml_op,
         .execution_status = PCMK_EXEC_UNKNOWN,
     };
 
     CRM_CHECK(rsc && node && xml_op, return);
 
     history.id = pcmk__xe_id(xml_op);
     if (history.id == NULL) {
         pcmk__config_err("Ignoring resource history entry for %s on %s "
                          "without ID", rsc->id, pcmk__node_name(node));
         return;
     }
 
     // Task and interval
     history.task = crm_element_value(xml_op, PCMK_XA_OPERATION);
     if (history.task == NULL) {
         pcmk__config_err("Ignoring resource history entry %s for %s on %s "
                          "without " PCMK_XA_OPERATION,
                          history.id, rsc->id, pcmk__node_name(node));
         return;
     }
     crm_element_value_ms(xml_op, PCMK_META_INTERVAL, &(history.interval_ms));
     if (!can_affect_state(&history)) {
         pcmk__rsc_trace(rsc,
                         "Ignoring resource history entry %s for %s on %s "
                         "with irrelevant action '%s'",
                         history.id, rsc->id, pcmk__node_name(node),
                         history.task);
         return;
     }
 
     if (unpack_action_result(&history) != pcmk_rc_ok) {
         return; // Error already logged
     }
 
     history.expected_exit_status = pe__target_rc_from_xml(xml_op);
     history.key = pcmk__xe_history_key(xml_op);
     crm_element_value_int(xml_op, PCMK__XA_CALL_ID, &(history.call_id));
 
     pcmk__rsc_trace(rsc, "Unpacking %s (%s call %d on %s): %s (%s)",
                     history.id, history.task, history.call_id,
                     pcmk__node_name(node),
                     pcmk_exec_status_str(history.execution_status),
                     crm_exit_str(history.exit_status));
 
     if (node->details->unclean) {
         pcmk__rsc_trace(rsc,
                         "%s is running on %s, which is unclean (further action "
                         "depends on value of stop's on-fail attribute)",
                         rsc->id, pcmk__node_name(node));
     }
 
     expired = check_operation_expiry(&history);
     old_rc = history.exit_status;
 
     remap_operation(&history, on_fail, expired);
 
     if (expired && (process_expired_result(&history, old_rc) == pcmk_rc_ok)) {
         goto done;
     }
 
     if (!pcmk__is_bundled(rsc) && pcmk_xe_mask_probe_failure(xml_op)) {
         mask_probe_failure(&history, old_rc, *last_failure, on_fail);
         goto done;
     }
 
     if (!pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
         parent = uber_parent(rsc);
     }
 
     switch (history.execution_status) {
         case PCMK_EXEC_PENDING:
             process_pending_action(&history, *last_failure);
             goto done;
 
         case PCMK_EXEC_DONE:
             update_resource_state(&history, history.exit_status, *last_failure,
                                   on_fail);
             goto done;
 
         case PCMK_EXEC_NOT_INSTALLED:
             unpack_failure_handling(&history, &failure_strategy, &fail_role);
             if (failure_strategy == pcmk__on_fail_ignore) {
                 crm_warn("Cannot ignore failed %s of %s on %s: "
                          "Resource agent doesn't exist "
                          QB_XS " status=%d rc=%d id=%s",
                          history.task, rsc->id, pcmk__node_name(node),
                          history.execution_status, history.exit_status,
                          history.id);
                 /* Also for printing it as "FAILED" by marking it as
                  * pcmk__rsc_failed later
                  */
                 *on_fail = pcmk__on_fail_ban;
             }
             resource_location(parent, node, -PCMK_SCORE_INFINITY,
                               "hard-error", rsc->private->scheduler);
             unpack_rsc_op_failure(&history, failure_strategy, fail_role,
                                   last_failure, on_fail);
             goto done;
 
         case PCMK_EXEC_NOT_CONNECTED:
             if (pcmk__is_pacemaker_remote_node(node)
                 && pcmk_is_set(node->private->remote->flags,
                                pcmk__rsc_managed)) {
                 /* We should never get into a situation where a managed remote
                  * connection resource is considered OK but a resource action
                  * behind the connection gets a "not connected" status. But as a
                  * fail-safe in case a bug or unusual circumstances do lead to
                  * that, ensure the remote connection is considered failed.
                  */
                 pcmk__set_rsc_flags(node->private->remote,
                                     pcmk__rsc_failed|pcmk__rsc_stop_if_failed);
             }
             break; // Not done, do error handling
 
         case PCMK_EXEC_ERROR:
         case PCMK_EXEC_ERROR_HARD:
         case PCMK_EXEC_ERROR_FATAL:
         case PCMK_EXEC_TIMEOUT:
         case PCMK_EXEC_NOT_SUPPORTED:
         case PCMK_EXEC_INVALID:
             break; // Not done, do error handling
 
         default: // No other value should be possible at this point
             break;
     }
 
     unpack_failure_handling(&history, &failure_strategy, &fail_role);
     if ((failure_strategy == pcmk__on_fail_ignore)
         || ((failure_strategy == pcmk__on_fail_restart_container)
             && (strcmp(history.task, PCMK_ACTION_STOP) == 0))) {
 
         char *last_change_s = last_change_str(xml_op);
 
         crm_warn("Pretending failed %s (%s%s%s) of %s on %s at %s succeeded "
                  QB_XS " %s",
                  history.task, services_ocf_exitcode_str(history.exit_status),
                  (pcmk__str_empty(history.exit_reason)? "" : ": "),
                  pcmk__s(history.exit_reason, ""), rsc->id,
                  pcmk__node_name(node), last_change_s, history.id);
         free(last_change_s);
 
         update_resource_state(&history, history.expected_exit_status,
                               *last_failure, on_fail);
         crm_xml_add(xml_op, PCMK_XA_UNAME, node->private->name);
         pcmk__set_rsc_flags(rsc, pcmk__rsc_ignore_failure);
 
         record_failed_op(&history);
 
         if ((failure_strategy == pcmk__on_fail_restart_container)
             && (*on_fail <= pcmk__on_fail_restart)) {
             *on_fail = failure_strategy;
         }
 
     } else {
         unpack_rsc_op_failure(&history, failure_strategy, fail_role,
                               last_failure, on_fail);
 
         if (history.execution_status == PCMK_EXEC_ERROR_HARD) {
             uint8_t log_level = LOG_ERR;
 
             if (history.exit_status == PCMK_OCF_NOT_INSTALLED) {
                 log_level = LOG_NOTICE;
             }
             do_crm_log(log_level,
                        "Preventing %s from restarting on %s because "
                        "of hard failure (%s%s%s) " QB_XS " %s",
                        parent->id, pcmk__node_name(node),
                        services_ocf_exitcode_str(history.exit_status),
                        (pcmk__str_empty(history.exit_reason)? "" : ": "),
                        pcmk__s(history.exit_reason, ""), history.id);
             resource_location(parent, node, -PCMK_SCORE_INFINITY,
                               "hard-error", rsc->private->scheduler);
 
         } else if (history.execution_status == PCMK_EXEC_ERROR_FATAL) {
-            pcmk__sched_err("Preventing %s from restarting anywhere because "
+            pcmk__sched_err(rsc->private->scheduler,
+                            "Preventing %s from restarting anywhere because "
                             "of fatal failure (%s%s%s) " QB_XS " %s",
                             parent->id,
                             services_ocf_exitcode_str(history.exit_status),
                             (pcmk__str_empty(history.exit_reason)? "" : ": "),
                             pcmk__s(history.exit_reason, ""), history.id);
             resource_location(parent, NULL, -PCMK_SCORE_INFINITY,
                               "fatal-error", rsc->private->scheduler);
         }
     }
 
 done:
     pcmk__rsc_trace(rsc, "%s role on %s after %s is %s (next %s)",
                     rsc->id, pcmk__node_name(node), history.id,
                     pcmk_role_text(rsc->private->orig_role),
                     pcmk_role_text(rsc->private->next_role));
 }
 
 static void
 add_node_attrs(const xmlNode *xml_obj, pcmk_node_t *node, bool overwrite,
                pcmk_scheduler_t *scheduler)
 {
     const char *cluster_name = NULL;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .now = scheduler->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     pcmk__insert_dup(node->private->attrs,
                      CRM_ATTR_UNAME, node->private->name);
 
     pcmk__insert_dup(node->private->attrs, CRM_ATTR_ID, node->private->id);
     if (pcmk__str_eq(node->private->id, scheduler->dc_uuid, pcmk__str_casei)) {
         scheduler->dc_node = node;
         pcmk__insert_dup(node->private->attrs,
                          CRM_ATTR_IS_DC, PCMK_VALUE_TRUE);
     } else {
         pcmk__insert_dup(node->private->attrs,
                          CRM_ATTR_IS_DC, PCMK_VALUE_FALSE);
     }
 
     cluster_name = g_hash_table_lookup(scheduler->config_hash,
                                        PCMK_OPT_CLUSTER_NAME);
     if (cluster_name) {
         pcmk__insert_dup(node->private->attrs, CRM_ATTR_CLUSTER_NAME,
                          cluster_name);
     }
 
     pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_INSTANCE_ATTRIBUTES, &rule_data,
                                node->private->attrs, NULL, overwrite,
                                scheduler);
 
     pe__unpack_dataset_nvpairs(xml_obj, PCMK_XE_UTILIZATION, &rule_data,
                                node->private->utilization, NULL,
                                FALSE, scheduler);
 
     if (pcmk__node_attr(node, CRM_ATTR_SITE_NAME, NULL,
                         pcmk__rsc_node_current) == NULL) {
         const char *site_name = pcmk__node_attr(node, "site-name", NULL,
                                                 pcmk__rsc_node_current);
 
         if (site_name) {
             pcmk__insert_dup(node->private->attrs,
                              CRM_ATTR_SITE_NAME, site_name);
 
         } else if (cluster_name) {
             /* Default to cluster-name if unset */
             pcmk__insert_dup(node->private->attrs,
                              CRM_ATTR_SITE_NAME, cluster_name);
         }
     }
 }
 
 static GList *
 extract_operations(const char *node, const char *rsc, xmlNode * rsc_entry, gboolean active_filter)
 {
     int counter = -1;
     int stop_index = -1;
     int start_index = -1;
 
     xmlNode *rsc_op = NULL;
 
     GList *gIter = NULL;
     GList *op_list = NULL;
     GList *sorted_op_list = NULL;
 
     /* extract operations */
     op_list = NULL;
     sorted_op_list = NULL;
 
     for (rsc_op = pcmk__xe_first_child(rsc_entry, NULL, NULL, NULL);
          rsc_op != NULL; rsc_op = pcmk__xe_next(rsc_op)) {
 
         if (pcmk__xe_is(rsc_op, PCMK__XE_LRM_RSC_OP)) {
             crm_xml_add(rsc_op, PCMK_XA_RESOURCE, rsc);
             crm_xml_add(rsc_op, PCMK_XA_UNAME, node);
             op_list = g_list_prepend(op_list, rsc_op);
         }
     }
 
     if (op_list == NULL) {
         /* if there are no operations, there is nothing to do */
         return NULL;
     }
 
     sorted_op_list = g_list_sort(op_list, sort_op_by_callid);
 
     /* create active recurring operations as optional */
     if (active_filter == FALSE) {
         return sorted_op_list;
     }
 
     op_list = NULL;
 
     calculate_active_ops(sorted_op_list, &start_index, &stop_index);
 
     for (gIter = sorted_op_list; gIter != NULL; gIter = gIter->next) {
         xmlNode *rsc_op = (xmlNode *) gIter->data;
 
         counter++;
 
         if (start_index < stop_index) {
             crm_trace("Skipping %s: not active", pcmk__xe_id(rsc_entry));
             break;
 
         } else if (counter < start_index) {
             crm_trace("Skipping %s: old", pcmk__xe_id(rsc_op));
             continue;
         }
         op_list = g_list_append(op_list, rsc_op);
     }
 
     g_list_free(sorted_op_list);
     return op_list;
 }
 
 GList *
 find_operations(const char *rsc, const char *node, gboolean active_filter,
                 pcmk_scheduler_t *scheduler)
 {
     GList *output = NULL;
     GList *intermediate = NULL;
 
     xmlNode *tmp = NULL;
     xmlNode *status = pcmk__xe_first_child(scheduler->input, PCMK_XE_STATUS,
                                            NULL, NULL);
 
     pcmk_node_t *this_node = NULL;
 
     xmlNode *node_state = NULL;
 
     CRM_CHECK(status != NULL, return NULL);
 
     for (node_state = pcmk__xe_first_child(status, NULL, NULL, NULL);
          node_state != NULL; node_state = pcmk__xe_next(node_state)) {
 
         if (pcmk__xe_is(node_state, PCMK__XE_NODE_STATE)) {
             const char *uname = crm_element_value(node_state, PCMK_XA_UNAME);
 
             if (node != NULL && !pcmk__str_eq(uname, node, pcmk__str_casei)) {
                 continue;
             }
 
             this_node = pcmk_find_node(scheduler, uname);
             if(this_node == NULL) {
                 CRM_LOG_ASSERT(this_node != NULL);
                 continue;
 
             } else if (pcmk__is_pacemaker_remote_node(this_node)) {
                 determine_remote_online_status(scheduler, this_node);
 
             } else {
                 determine_online_status(node_state, this_node, scheduler);
             }
 
             if (this_node->details->online
                 || pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
                 /* offline nodes run no resources...
                  * unless stonith is enabled in which case we need to
                  *   make sure rsc start events happen after the stonith
                  */
                 xmlNode *lrm_rsc = NULL;
 
                 tmp = pcmk__xe_first_child(node_state, PCMK__XE_LRM, NULL,
                                            NULL);
                 tmp = pcmk__xe_first_child(tmp, PCMK__XE_LRM_RESOURCES, NULL,
                                            NULL);
 
                 for (lrm_rsc = pcmk__xe_first_child(tmp, NULL, NULL, NULL);
                      lrm_rsc != NULL; lrm_rsc = pcmk__xe_next(lrm_rsc)) {
 
                     if (pcmk__xe_is(lrm_rsc, PCMK__XE_LRM_RESOURCE)) {
                         const char *rsc_id = crm_element_value(lrm_rsc,
                                                                PCMK_XA_ID);
 
                         if (rsc != NULL && !pcmk__str_eq(rsc_id, rsc, pcmk__str_casei)) {
                             continue;
                         }
 
                         intermediate = extract_operations(uname, rsc_id, lrm_rsc, active_filter);
                         output = g_list_concat(output, intermediate);
                     }
                 }
             }
         }
     }
 
     return output;
 }
diff --git a/lib/pengine/utils.c b/lib/pengine/utils.c
index d798a49eb6..d647a3b390 100644
--- a/lib/pengine/utils.c
+++ b/lib/pengine/utils.c
@@ -1,905 +1,906 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <glib.h>
 #include <stdbool.h>
 
 #include <crm/crm.h>
 #include <crm/common/xml.h>
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 
 #include "pe_status_private.h"
 
 extern bool pcmk__is_daemon;
 
 gboolean ghash_free_str_str(gpointer key, gpointer value, gpointer user_data);
 
 /*!
  * \internal
  * \brief Check whether we can fence a particular node
  *
  * \param[in] scheduler  Scheduler data
  * \param[in] node       Name of node to check
  *
  * \return true if node can be fenced, false otherwise
  */
 bool
 pe_can_fence(const pcmk_scheduler_t *scheduler, const pcmk_node_t *node)
 {
     if (pcmk__is_guest_or_bundle_node(node)) {
         /* A guest or bundle node is fenced by stopping its launcher, which is
          * possible if the launcher's host is either online or fenceable.
          */
         pcmk_resource_t *rsc = node->private->remote->private->launcher;
 
         for (GList *n = rsc->private->active_nodes; n != NULL; n = n->next) {
             pcmk_node_t *launcher_node = n->data;
 
             if (!launcher_node->details->online
                 && !pe_can_fence(scheduler, launcher_node)) {
                 return false;
             }
         }
         return true;
 
     } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_fencing_enabled)) {
         return false; /* Turned off */
 
     } else if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_fencing)) {
         return false; /* No devices */
 
     } else if (pcmk_is_set(scheduler->flags, pcmk_sched_quorate)) {
         return true;
 
     } else if (scheduler->no_quorum_policy == pcmk_no_quorum_ignore) {
         return true;
 
     } else if(node == NULL) {
         return false;
 
     } else if(node->details->online) {
         crm_notice("We can fence %s without quorum because they're in our membership",
                    pcmk__node_name(node));
         return true;
     }
 
     crm_trace("Cannot fence %s", pcmk__node_name(node));
     return false;
 }
 
 /*!
  * \internal
  * \brief Copy a node object
  *
  * \param[in] this_node  Node object to copy
  *
  * \return Newly allocated shallow copy of this_node
  * \note This function asserts on errors and is guaranteed to return non-NULL.
  */
 pcmk_node_t *
 pe__copy_node(const pcmk_node_t *this_node)
 {
     pcmk_node_t *new_node = NULL;
 
     CRM_ASSERT(this_node != NULL);
 
     new_node = pcmk__assert_alloc(1, sizeof(pcmk_node_t));
     new_node->assign = pcmk__assert_alloc(1,
                                           sizeof(struct pcmk__node_assignment));
 
     new_node->assign->probe_mode = this_node->assign->probe_mode;
     new_node->assign->score = this_node->assign->score;
     new_node->assign->count = this_node->assign->count;
     new_node->details = this_node->details;
     new_node->private = this_node->private;
 
     return new_node;
 }
 
 /*!
  * \internal
  * \brief Create a node hash table from a node list
  *
  * \param[in] list  Node list
  *
  * \return Hash table equivalent of node list
  */
 GHashTable *
 pe__node_list2table(const GList *list)
 {
     GHashTable *result = NULL;
 
     result = pcmk__strkey_table(NULL, free);
     for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         pcmk_node_t *new_node = NULL;
 
         new_node = pe__copy_node((const pcmk_node_t *) gIter->data);
         g_hash_table_insert(result, (gpointer) new_node->private->id, new_node);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Compare two nodes by name, with numeric portions sorted numerically
  *
  * Sort two node names case-insensitively like strcasecmp(), but with any
  * numeric portions of the name sorted numerically. For example, "node10" will
  * sort higher than "node9" but lower than "remotenode9".
  *
  * \param[in] a  First node to compare (can be \c NULL)
  * \param[in] b  Second node to compare (can be \c NULL)
  *
  * \retval -1 \c a comes before \c b (or \c a is \c NULL and \c b is not)
  * \retval  0 \c a and \c b are equal (or both are \c NULL)
  * \retval  1 \c a comes after \c b (or \c b is \c NULL and \c a is not)
  */
 gint
 pe__cmp_node_name(gconstpointer a, gconstpointer b)
 {
     const pcmk_node_t *node1 = (const pcmk_node_t *) a;
     const pcmk_node_t *node2 = (const pcmk_node_t *) b;
 
     if ((node1 == NULL) && (node2 == NULL)) {
         return 0;
     }
 
     if (node1 == NULL) {
         return -1;
     }
 
     if (node2 == NULL) {
         return 1;
     }
 
     return pcmk__numeric_strcasecmp(node1->private->name, node2->private->name);
 }
 
 /*!
  * \internal
  * \brief Output node weights to stdout
  *
  * \param[in]     rsc        Use allowed nodes for this resource
  * \param[in]     comment    Text description to prefix lines with
  * \param[in]     nodes      If rsc is not specified, use these nodes
  * \param[in,out] scheduler  Scheduler data
  */
 static void
 pe__output_node_weights(const pcmk_resource_t *rsc, const char *comment,
                         GHashTable *nodes, pcmk_scheduler_t *scheduler)
 {
     pcmk__output_t *out = scheduler->priv;
 
     // Sort the nodes so the output is consistent for regression tests
     GList *list = g_list_sort(g_hash_table_get_values(nodes),
                               pe__cmp_node_name);
 
     for (const GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         const pcmk_node_t *node = (const pcmk_node_t *) gIter->data;
 
         out->message(out, "node-weight", rsc, comment, node->private->name,
                      pcmk_readable_score(node->assign->score));
     }
     g_list_free(list);
 }
 
 /*!
  * \internal
  * \brief Log node weights at trace level
  *
  * \param[in] file      Caller's filename
  * \param[in] function  Caller's function name
  * \param[in] line      Caller's line number
  * \param[in] rsc       If not NULL, include this resource's ID in logs
  * \param[in] comment   Text description to prefix lines with
  * \param[in] nodes     Nodes whose scores should be logged
  */
 static void
 pe__log_node_weights(const char *file, const char *function, int line,
                      const pcmk_resource_t *rsc, const char *comment,
                      GHashTable *nodes)
 {
     GHashTableIter iter;
     pcmk_node_t *node = NULL;
 
     // Don't waste time if we're not tracing at this point
     pcmk__if_tracing({}, return);
 
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         if (rsc) {
             qb_log_from_external_source(function, file,
                                         "%s: %s allocation score on %s: %s",
                                         LOG_TRACE, line, 0,
                                         comment, rsc->id,
                                         pcmk__node_name(node),
                                         pcmk_readable_score(node->assign->score));
         } else {
             qb_log_from_external_source(function, file, "%s: %s = %s",
                                         LOG_TRACE, line, 0,
                                         comment, pcmk__node_name(node),
                                         pcmk_readable_score(node->assign->score));
         }
     }
 }
 
 /*!
  * \internal
  * \brief Log or output node weights
  *
  * \param[in]     file       Caller's filename
  * \param[in]     function   Caller's function name
  * \param[in]     line       Caller's line number
  * \param[in]     to_log     Log if true, otherwise output
  * \param[in]     rsc        If not NULL, use this resource's ID in logs,
  *                           and show scores recursively for any children
  * \param[in]     comment    Text description to prefix lines with
  * \param[in]     nodes      Nodes whose scores should be shown
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pe__show_node_scores_as(const char *file, const char *function, int line,
                         bool to_log, const pcmk_resource_t *rsc,
                         const char *comment, GHashTable *nodes,
                         pcmk_scheduler_t *scheduler)
 {
     if ((rsc != NULL) && pcmk_is_set(rsc->flags, pcmk__rsc_removed)) {
         // Don't show allocation scores for orphans
         return;
     }
     if (nodes == NULL) {
         // Nothing to show
         return;
     }
 
     if (to_log) {
         pe__log_node_weights(file, function, line, rsc, comment, nodes);
     } else {
         pe__output_node_weights(rsc, comment, nodes, scheduler);
     }
 
     if (rsc == NULL) {
         return;
     }
 
     // If this resource has children, repeat recursively for each
     for (GList *gIter = rsc->private->children;
          gIter != NULL; gIter = gIter->next) {
 
         pcmk_resource_t *child = (pcmk_resource_t *) gIter->data;
 
         pe__show_node_scores_as(file, function, line, to_log, child, comment,
                                 child->private->allowed_nodes, scheduler);
     }
 }
 
 /*!
  * \internal
  * \brief Compare two resources by priority
  *
  * \param[in] a  First resource to compare (can be \c NULL)
  * \param[in] b  Second resource to compare (can be \c NULL)
  *
  * \retval -1 a's priority > b's priority (or \c b is \c NULL and \c a is not)
  * \retval  0 a's priority == b's priority (or both \c a and \c b are \c NULL)
  * \retval  1 a's priority < b's priority (or \c a is \c NULL and \c b is not)
  */
 gint
 pe__cmp_rsc_priority(gconstpointer a, gconstpointer b)
 {
     const pcmk_resource_t *resource1 = (const pcmk_resource_t *)a;
     const pcmk_resource_t *resource2 = (const pcmk_resource_t *)b;
 
     if (a == NULL && b == NULL) {
         return 0;
     }
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     if (resource1->private->priority > resource2->private->priority) {
         return -1;
     }
 
     if (resource1->private->priority < resource2->private->priority) {
         return 1;
     }
 
     return 0;
 }
 
 static void
 resource_node_score(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
                     const char *tag)
 {
     pcmk_node_t *match = NULL;
 
     if ((pcmk_is_set(rsc->flags, pcmk__rsc_exclusive_probes)
          || (node->assign->probe_mode == pcmk__probe_never))
         && pcmk__str_eq(tag, "symmetric_default", pcmk__str_casei)) {
         /* This string comparision may be fragile, but exclusive resources and
          * exclusive nodes should not have the symmetric_default constraint
          * applied to them.
          */
         return;
 
     } else {
         for (GList *gIter = rsc->private->children;
              gIter != NULL; gIter = gIter->next) {
 
             pcmk_resource_t *child_rsc = (pcmk_resource_t *) gIter->data;
 
             resource_node_score(child_rsc, node, score, tag);
         }
     }
 
     match = g_hash_table_lookup(rsc->private->allowed_nodes, node->private->id);
     if (match == NULL) {
         match = pe__copy_node(node);
         g_hash_table_insert(rsc->private->allowed_nodes,
                             (gpointer) match->private->id, match);
     }
     match->assign->score = pcmk__add_scores(match->assign->score, score);
     pcmk__rsc_trace(rsc,
                     "Enabling %s preference (%s) for %s on %s (now %s)",
                     tag, pcmk_readable_score(score), rsc->id,
                     pcmk__node_name(node),
                     pcmk_readable_score(match->assign->score));
 }
 
 void
 resource_location(pcmk_resource_t *rsc, const pcmk_node_t *node, int score,
                   const char *tag, pcmk_scheduler_t *scheduler)
 {
     if (node != NULL) {
         resource_node_score(rsc, node, score, tag);
 
     } else if (scheduler != NULL) {
         GList *gIter = scheduler->nodes;
 
         for (; gIter != NULL; gIter = gIter->next) {
             pcmk_node_t *node_iter = (pcmk_node_t *) gIter->data;
 
             resource_node_score(rsc, node_iter, score, tag);
         }
 
     } else {
         GHashTableIter iter;
         pcmk_node_t *node_iter = NULL;
 
         g_hash_table_iter_init(&iter, rsc->private->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node_iter)) {
             resource_node_score(rsc, node_iter, score, tag);
         }
     }
 
     if ((node == NULL) && (score == -PCMK_SCORE_INFINITY)
         && (rsc->private->assigned_node != NULL)) {
 
         // @TODO Should this be more like pcmk__unassign_resource()?
         crm_info("Unassigning %s from %s",
                  rsc->id, pcmk__node_name(rsc->private->assigned_node));
         free(rsc->private->assigned_node);
         rsc->private->assigned_node = NULL;
     }
 }
 
 time_t
 get_effective_time(pcmk_scheduler_t *scheduler)
 {
     if(scheduler) {
         if (scheduler->now == NULL) {
             crm_trace("Recording a new 'now'");
             scheduler->now = crm_time_new(NULL);
         }
         return crm_time_get_seconds_since_epoch(scheduler->now);
     }
 
     crm_trace("Defaulting to 'now'");
     return time(NULL);
 }
 
 gboolean
 get_target_role(const pcmk_resource_t *rsc, enum rsc_role_e *role)
 {
     enum rsc_role_e local_role = pcmk_role_unknown;
     const char *value = g_hash_table_lookup(rsc->private->meta,
                                             PCMK_META_TARGET_ROLE);
 
     CRM_CHECK(role != NULL, return FALSE);
 
     if (pcmk__str_eq(value, PCMK_ROLE_STARTED,
                      pcmk__str_null_matches|pcmk__str_casei)) {
         return FALSE;
     }
     if (pcmk__str_eq(PCMK_VALUE_DEFAULT, value, pcmk__str_casei)) {
         // @COMPAT Deprecated since 2.1.8
         pcmk__config_warn("Support for setting " PCMK_META_TARGET_ROLE
                           " to the explicit value '" PCMK_VALUE_DEFAULT
                           "' is deprecated and will be removed in a "
                           "future release (just leave it unset)");
         return FALSE;
     }
 
     local_role = pcmk_parse_role(value);
     if (local_role == pcmk_role_unknown) {
         pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s "
                          "because '%s' is not valid", rsc->id, value);
         return FALSE;
 
     } else if (local_role > pcmk_role_started) {
         if (pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                         pcmk__rsc_promotable)) {
             if (local_role > pcmk_role_unpromoted) {
                 /* This is what we'd do anyway, just leave the default to avoid messing up the placement algorithm */
                 return FALSE;
             }
 
         } else {
             pcmk__config_err("Ignoring '" PCMK_META_TARGET_ROLE "' for %s "
                              "because '%s' only makes sense for promotable "
                              "clones", rsc->id, value);
             return FALSE;
         }
     }
 
     *role = local_role;
     return TRUE;
 }
 
 gboolean
 order_actions(pcmk_action_t *lh_action, pcmk_action_t *rh_action,
               uint32_t flags)
 {
     GList *gIter = NULL;
     pcmk__related_action_t *wrapper = NULL;
     GList *list = NULL;
 
     if (flags == pcmk__ar_none) {
         return FALSE;
     }
 
     if (lh_action == NULL || rh_action == NULL) {
         return FALSE;
     }
 
     crm_trace("Creating action wrappers for ordering: %s then %s",
               lh_action->uuid, rh_action->uuid);
 
     /* Ensure we never create a dependency on ourselves... it's happened */
     CRM_ASSERT(lh_action != rh_action);
 
     /* Filter dups, otherwise update_action_states() has too much work to do */
     gIter = lh_action->actions_after;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__related_action_t *after = gIter->data;
 
         if ((after->action == rh_action)
             && pcmk_any_flags_set(after->flags, flags)) {
             return FALSE;
         }
     }
 
     wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t));
     wrapper->action = rh_action;
     wrapper->flags = flags;
     list = lh_action->actions_after;
     list = g_list_prepend(list, wrapper);
     lh_action->actions_after = list;
 
     wrapper = pcmk__assert_alloc(1, sizeof(pcmk__related_action_t));
     wrapper->action = lh_action;
     wrapper->flags = flags;
     list = rh_action->actions_before;
     list = g_list_prepend(list, wrapper);
     rh_action->actions_before = list;
     return TRUE;
 }
 
 void
 destroy_ticket(gpointer data)
 {
     pcmk__ticket_t *ticket = data;
 
     if (ticket->state) {
         g_hash_table_destroy(ticket->state);
     }
     free(ticket->id);
     free(ticket);
 }
 
 pcmk__ticket_t *
 ticket_new(const char *ticket_id, pcmk_scheduler_t *scheduler)
 {
     pcmk__ticket_t *ticket = NULL;
 
     if (pcmk__str_empty(ticket_id)) {
         return NULL;
     }
 
     if (scheduler->tickets == NULL) {
         scheduler->tickets = pcmk__strkey_table(free, destroy_ticket);
     }
 
     ticket = g_hash_table_lookup(scheduler->tickets, ticket_id);
     if (ticket == NULL) {
 
         ticket = calloc(1, sizeof(pcmk__ticket_t));
         if (ticket == NULL) {
-            pcmk__sched_err("Cannot allocate ticket '%s'", ticket_id);
+            pcmk__sched_err(scheduler, "Cannot allocate ticket '%s'",
+                            ticket_id);
             return NULL;
         }
 
         crm_trace("Creating ticket entry for %s", ticket_id);
 
         ticket->id = strdup(ticket_id);
         ticket->last_granted = -1;
         ticket->state = pcmk__strkey_table(free, free);
 
         g_hash_table_insert(scheduler->tickets, strdup(ticket->id), ticket);
     }
 
     return ticket;
 }
 
 const char *
 rsc_printable_id(const pcmk_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pcmk__rsc_unique)) {
         return rsc->id;
     }
     return pcmk__xe_id(rsc->private->xml);
 }
 
 void
 pe__clear_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
 {
     pcmk__clear_rsc_flags(rsc, flags);
 
     for (GList *gIter = rsc->private->children;
          gIter != NULL; gIter = gIter->next) {
 
         pe__clear_resource_flags_recursive((pcmk_resource_t *) gIter->data,
                                            flags);
     }
 }
 
 void
 pe__clear_resource_flags_on_all(pcmk_scheduler_t *scheduler, uint64_t flag)
 {
     for (GList *lpc = scheduler->resources; lpc != NULL; lpc = lpc->next) {
         pcmk_resource_t *r = (pcmk_resource_t *) lpc->data;
         pe__clear_resource_flags_recursive(r, flag);
     }
 }
 
 void
 pe__set_resource_flags_recursive(pcmk_resource_t *rsc, uint64_t flags)
 {
     pcmk__set_rsc_flags(rsc, flags);
 
     for (GList *gIter = rsc->private->children;
          gIter != NULL; gIter = gIter->next) {
 
         pe__set_resource_flags_recursive((pcmk_resource_t *) gIter->data,
                                          flags);
     }
 }
 
 void
 trigger_unfencing(pcmk_resource_t *rsc, pcmk_node_t *node, const char *reason,
                   pcmk_action_t *dependency, pcmk_scheduler_t *scheduler)
 {
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_enable_unfencing)) {
         /* No resources require it */
         return;
 
     } else if ((rsc != NULL)
                && !pcmk_is_set(rsc->flags, pcmk__rsc_fence_device)) {
         /* Wasn't a stonith device */
         return;
 
     } else if(node
               && node->details->online
               && node->details->unclean == FALSE
               && node->details->shutdown == FALSE) {
         pcmk_action_t *unfence = pe_fence_op(node, PCMK_ACTION_ON, FALSE,
                                              reason, FALSE, scheduler);
 
         if(dependency) {
             order_actions(unfence, dependency, pcmk__ar_ordered);
         }
 
     } else if(rsc) {
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, rsc->private->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if(node->details->online && node->details->unclean == FALSE && node->details->shutdown == FALSE) {
                 trigger_unfencing(rsc, node, reason, dependency, scheduler);
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Check whether shutdown has been requested for a node
  *
  * \param[in] node  Node to check
  *
  * \return TRUE if node has shutdown attribute set and nonzero, FALSE otherwise
  * \note This differs from simply using node->details->shutdown in that it can
  *       be used before that has been determined (and in fact to determine it),
  *       and it can also be used to distinguish requested shutdown from implicit
  *       shutdown of remote nodes by virtue of their connection stopping.
  */
 bool
 pe__shutdown_requested(const pcmk_node_t *node)
 {
     const char *shutdown = pcmk__node_attr(node, PCMK__NODE_ATTR_SHUTDOWN, NULL,
                                            pcmk__rsc_node_current);
 
     return !pcmk__str_eq(shutdown, "0", pcmk__str_null_matches);
 }
 
 /*!
  * \internal
  * \brief Update "recheck by" time in scheduler data
  *
  * \param[in]     recheck    Epoch time when recheck should happen
  * \param[in,out] scheduler  Scheduler data
  * \param[in]     reason     What time is being updated for (for logs)
  */
 void
 pe__update_recheck_time(time_t recheck, pcmk_scheduler_t *scheduler,
                         const char *reason)
 {
     if ((recheck > get_effective_time(scheduler))
         && ((scheduler->recheck_by == 0)
             || (scheduler->recheck_by > recheck))) {
         scheduler->recheck_by = recheck;
         crm_debug("Updated next scheduler recheck to %s for %s",
                   pcmk__trim(ctime(&recheck)), reason);
     }
 }
 
 /*!
  * \internal
  * \brief Extract nvpair blocks contained by a CIB XML element into a hash table
  *
  * \param[in]     xml_obj       XML element containing blocks of nvpair elements
  * \param[in]     set_name      If not NULL, only use blocks of this element
  * \param[in]     rule_data     Matching parameters to use when unpacking
  * \param[out]    hash          Where to store extracted name/value pairs
  * \param[in]     always_first  If not NULL, process block with this ID first
  * \param[in]     overwrite     Whether to replace existing values with same name
  * \param[in,out] scheduler     Scheduler data containing \p xml_obj
  */
 void
 pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
                            const pe_rule_eval_data_t *rule_data,
                            GHashTable *hash, const char *always_first,
                            gboolean overwrite, pcmk_scheduler_t *scheduler)
 {
     crm_time_t *next_change = crm_time_new_undefined();
 
     pe_eval_nvpairs(scheduler->input, xml_obj, set_name, rule_data, hash,
                     always_first, overwrite, next_change);
     if (crm_time_is_defined(next_change)) {
         time_t recheck = (time_t) crm_time_get_seconds_since_epoch(next_change);
 
         pe__update_recheck_time(recheck, scheduler, "rule evaluation");
     }
     crm_time_free(next_change);
 }
 
 bool
 pe__resource_is_disabled(const pcmk_resource_t *rsc)
 {
     const char *target_role = NULL;
 
     CRM_CHECK(rsc != NULL, return false);
     target_role = g_hash_table_lookup(rsc->private->meta,
                                       PCMK_META_TARGET_ROLE);
     if (target_role) {
         // If invalid, we've already logged an error when unpacking
         enum rsc_role_e target_role_e = pcmk_parse_role(target_role);
 
         if ((target_role_e == pcmk_role_stopped)
             || ((target_role_e == pcmk_role_unpromoted)
                 && pcmk_is_set(pe__const_top_resource(rsc, false)->flags,
                                pcmk__rsc_promotable))) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a resource is running only on given node
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p rsc is running only on \p node, otherwise false
  */
 bool
 pe__rsc_running_on_only(const pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     return (rsc != NULL) && pcmk__list_of_1(rsc->private->active_nodes)
            && pcmk__same_node((const pcmk_node_t *)
                               rsc->private->active_nodes->data, node);
 }
 
 bool
 pe__rsc_running_on_any(pcmk_resource_t *rsc, GList *node_list)
 {
     if (rsc != NULL) {
         for (GList *ele = rsc->private->active_nodes; ele; ele = ele->next) {
             pcmk_node_t *node = (pcmk_node_t *) ele->data;
             if (pcmk__str_in_list(node->private->name, node_list,
                                   pcmk__str_star_matches|pcmk__str_casei)) {
                 return true;
             }
         }
     }
     return false;
 }
 
 bool
 pcmk__rsc_filtered_by_node(pcmk_resource_t *rsc, GList *only_node)
 {
     return rsc->private->fns->active(rsc, FALSE)
            && !pe__rsc_running_on_any(rsc, only_node);
 }
 
 GList *
 pe__filter_rsc_list(GList *rscs, GList *filter)
 {
     GList *retval = NULL;
 
     for (GList *gIter = rscs; gIter; gIter = gIter->next) {
         pcmk_resource_t *rsc = (pcmk_resource_t *) gIter->data;
 
         /* I think the second condition is safe here for all callers of this
          * function.  If not, it needs to move into pe__node_text.
          */
         if (pcmk__str_in_list(rsc_printable_id(rsc), filter, pcmk__str_star_matches) ||
             ((rsc->private->parent != NULL)
              && pcmk__str_in_list(rsc_printable_id(rsc->private->parent),
                                   filter, pcmk__str_star_matches))) {
             retval = g_list_prepend(retval, rsc);
         }
     }
 
     return retval;
 }
 
 GList *
 pe__build_node_name_list(pcmk_scheduler_t *scheduler, const char *s)
 {
     GList *nodes = NULL;
 
     if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
         /* Nothing was given so return a list of all node names.  Or, '*' was
          * given.  This would normally fall into the pe__unames_with_tag branch
          * where it will return an empty list.  Catch it here instead.
          */
         nodes = g_list_prepend(nodes, strdup("*"));
     } else {
         pcmk_node_t *node = pcmk_find_node(scheduler, s);
 
         if (node) {
             /* The given string was a valid uname for a node.  Return a
              * singleton list containing just that uname.
              */
             nodes = g_list_prepend(nodes, strdup(s));
         } else {
             /* The given string was not a valid uname.  It's either a tag or
              * it's a typo or something.  In the first case, we'll return a
              * list of all the unames of the nodes with the given tag.  In the
              * second case, we'll return a NULL pointer and nothing will
              * get displayed.
              */
             nodes = pe__unames_with_tag(scheduler, s);
         }
     }
 
     return nodes;
 }
 
 GList *
 pe__build_rsc_list(pcmk_scheduler_t *scheduler, const char *s)
 {
     GList *resources = NULL;
 
     if (pcmk__str_eq(s, "*", pcmk__str_null_matches)) {
         resources = g_list_prepend(resources, strdup("*"));
     } else {
         const uint32_t flags = pcmk_rsc_match_history|pcmk_rsc_match_basename;
         pcmk_resource_t *rsc = pe_find_resource_with_flags(scheduler->resources,
                                                            s, flags);
 
         if (rsc) {
             /* A colon in the name we were given means we're being asked to filter
              * on a specific instance of a cloned resource.  Put that exact string
              * into the filter list.  Otherwise, use the printable ID of whatever
              * resource was found that matches what was asked for.
              */
             if (strstr(s, ":") != NULL) {
                 resources = g_list_prepend(resources, strdup(rsc->id));
             } else {
                 resources = g_list_prepend(resources, strdup(rsc_printable_id(rsc)));
             }
         } else {
             /* The given string was not a valid resource name. It's a tag or a
              * typo or something. See pe__build_node_name_list() for more
              * detail.
              */
             resources = pe__rscs_with_tag(scheduler, s);
         }
     }
 
     return resources;
 }
 
 xmlNode *
 pe__failed_probe_for_rsc(const pcmk_resource_t *rsc, const char *name)
 {
     const pcmk_resource_t *parent = pe__const_top_resource(rsc, false);
     const char *rsc_id = rsc->id;
 
     if (pcmk__is_clone(parent)) {
         rsc_id = pe__clone_child_id(parent);
     }
 
     for (xmlNode *xml_op = pcmk__xe_first_child(rsc->private->scheduler->failed,
                                                 NULL, NULL, NULL);
          xml_op != NULL; xml_op = pcmk__xe_next(xml_op)) {
 
         const char *value = NULL;
         char *op_id = NULL;
 
         /* This resource operation is not a failed probe. */
         if (!pcmk_xe_mask_probe_failure(xml_op)) {
             continue;
         }
 
         /* This resource operation was not run on the given node.  Note that if name is
          * NULL, this will always succeed.
          */
         value = crm_element_value(xml_op, PCMK__META_ON_NODE);
         if (value == NULL || !pcmk__str_eq(value, name, pcmk__str_casei|pcmk__str_null_matches)) {
             continue;
         }
 
         if (!parse_op_key(pcmk__xe_history_key(xml_op), &op_id, NULL, NULL)) {
             continue; // This history entry is missing an operation key
         }
 
         /* This resource operation's ID does not match the rsc_id we are looking for. */
         if (!pcmk__str_eq(op_id, rsc_id, pcmk__str_none)) {
             free(op_id);
             continue;
         }
 
         free(op_id);
         return xml_op;
     }
 
     return NULL;
 }