diff --git a/include/pcmki/pcmki_sched_allocate.h b/include/pcmki/pcmki_sched_allocate.h
index 82134c3702..b3549a526e 100644
--- a/include/pcmki/pcmki_sched_allocate.h
+++ b/include/pcmki/pcmki_sched_allocate.h
@@ -1,72 +1,75 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H
 #  define PCMK__PCMKI_PCMKI_SCHED_ALLOCATE__H
 
 #  include <glib.h>
 #  include <crm/common/xml.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/complex.h>
 #  include <crm/common/xml_internal.h>
 #  include <crm/pengine/internal.h>
 #  include <crm/common/xml.h>
 #  include <pcmki/pcmki_scheduler.h>
 
 pe_node_t *pcmk__group_allocate(pe_resource_t *rsc, const pe_node_t *prefer);
 void group_create_actions(pe_resource_t *rsc);
 void group_internal_constraints(pe_resource_t *rsc);
 enum pe_action_flags group_action_flags(pe_action_t *action,
                                         const pe_node_t *node);
 void group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
 extern void group_append_meta(pe_resource_t * rsc, xmlNode * xml);
-void pcmk__group_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
+void pcmk__group_add_utilization(const pe_resource_t *rsc,
+                                 const pe_resource_t *orig_rsc,
                                  GList *all_rscs, GHashTable *utilization);
 void pcmk__group_shutdown_lock(pe_resource_t *rsc);
 
 pe_node_t *pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer);
 void pcmk__bundle_create_actions(pe_resource_t *rsc);
 bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node);
 void pcmk__bundle_internal_constraints(pe_resource_t *rsc);
 void pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
 enum pe_action_flags pcmk__bundle_action_flags(pe_action_t *action,
                                                const pe_node_t *node);
 void pcmk__bundle_expand(pe_resource_t *rsc);
 void pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml);
-void pcmk__bundle_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
+void pcmk__bundle_add_utilization(const pe_resource_t *rsc,
+                                  const pe_resource_t *orig_rsc,
                                   GList *all_rscs, GHashTable *utilization);
 void pcmk__bundle_shutdown_lock(pe_resource_t *rsc);
 
 pe_node_t *pcmk__clone_allocate(pe_resource_t *rsc, const pe_node_t *prefer);
 void clone_create_actions(pe_resource_t *rsc);
 void clone_internal_constraints(pe_resource_t *rsc);
 void clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint);
 enum pe_action_flags clone_action_flags(pe_action_t *action,
                                         const pe_node_t *node);
 void clone_expand(pe_resource_t *rsc);
 bool clone_create_probe(pe_resource_t *rsc, pe_node_t *node);
 extern void clone_append_meta(pe_resource_t * rsc, xmlNode * xml);
-void pcmk__clone_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
+void pcmk__clone_add_utilization(const pe_resource_t *rsc,
+                                 const pe_resource_t *orig_rsc,
                                  GList *all_rscs, GHashTable *utilization);
 void pcmk__clone_shutdown_lock(pe_resource_t *rsc);
 
 uint32_t group_update_actions(pe_action_t *first, pe_action_t *then,
                               pe_node_t *node, uint32_t flags, uint32_t filter,
                               uint32_t type, pe_working_set_t *data_set);
 uint32_t pcmk__multi_update_actions(pe_action_t *first,
                                     pe_action_t *then,
                                     pe_node_t *node, uint32_t flags,
                                     uint32_t filter, uint32_t type,
                                     pe_working_set_t *data_set);
 
 void pcmk__log_transition_summary(const char *filename);
 void clone_create_pseudo_actions(pe_resource_t *rsc, GList *children,
                                  notify_data_t **start_notify,
                                  notify_data_t **stop_notify);
 #endif
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 3fc9c19724..c5ef204414 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,805 +1,806 @@
 /*
  * Copyright 2021-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__LIBPACEMAKER_PRIVATE__H
 #  define PCMK__LIBPACEMAKER_PRIVATE__H
 
 /* This header is for the sole use of libpacemaker, so that functions can be
  * declared with G_GNUC_INTERNAL for efficiency.
  */
 
 #include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
 
 // Flags to modify the behavior of the add_colocated_node_scores() method
 enum pcmk__coloc_select {
     // With no other flags, apply all "with this" colocations
     pcmk__coloc_select_default      = 0,
 
     // Apply "this with" colocations instead of "with this" colocations
     pcmk__coloc_select_this_with    = (1 << 0),
 
     // Apply only colocations with non-negative scores
     pcmk__coloc_select_nonnegative  = (1 << 1),
 
     // Apply only colocations with at least one matching node
     pcmk__coloc_select_active       = (1 << 2),
 };
 
 // Flags the update_ordered_actions() method can return
 enum pcmk__updated {
     pcmk__updated_none      = 0,        // Nothing changed
     pcmk__updated_first     = (1 << 0), // First action was updated
     pcmk__updated_then      = (1 << 1), // Then action was updated
 };
 
 #define pcmk__set_updated_flags(au_flags, action, flags_to_set) do {        \
         au_flags = pcmk__set_flags_as(__func__, __LINE__,                   \
                                       LOG_TRACE, "Action update",           \
                                       (action)->uuid, au_flags,             \
                                       (flags_to_set), #flags_to_set);       \
     } while (0)
 
 #define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do {    \
         au_flags = pcmk__clear_flags_as(__func__, __LINE__,                 \
                                         LOG_TRACE, "Action update",         \
                                         (action)->uuid, au_flags,           \
                                         (flags_to_clear), #flags_to_clear); \
     } while (0)
 
 // Resource allocation methods
 struct resource_alloc_functions_s {
     /*!
      * \internal
      * \brief Assign a resource to a node
      *
      * \param[in,out] rsc     Resource to assign to a node
      * \param[in]     prefer  Node to prefer, if all else is equal
      *
      * \return Node that \p rsc is assigned to, if assigned entirely to one node
      */
     pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer);
 
     /*!
      * \internal
      * \brief Create all actions needed for a given resource
      *
      * \param[in,out] rsc  Resource to create actions for
      */
     void (*create_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Schedule any probes needed for a resource on a node
      *
      * \param[in] rsc   Resource to create probe for
      * \param[in] node  Node to create probe on
      *
      * \return true if any probe was created, otherwise false
      */
     bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node);
 
     /*!
      * \internal
      * \brief Create implicit constraints needed for a resource
      *
      * \param[in,out] rsc  Resource to create implicit constraints for
      */
     void (*internal_constraints)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Apply a colocation's score to node weights or resource priority
      *
      * Given a colocation constraint, apply its score to the dependent's
      * allowed node weights (if we are still placing resources) or priority (if
      * we are choosing promotable clone instance roles).
      *
      * \param[in,out] dependent      Dependent resource in colocation
      * \param[in]     primary        Primary resource in colocation
      * \param[in]     colocation     Colocation constraint to apply
      * \param[in]     for_dependent  true if called on behalf of dependent
      */
     void (*apply_coloc_score) (pe_resource_t *dependent,
                                const pe_resource_t *primary,
                                const pcmk__colocation_t *colocation,
                                bool for_dependent);
 
     /*!
      * \internal
      * \brief Update nodes with scores of colocated resources' nodes
      *
      * Given a table of nodes and a resource, update the nodes' scores with the
      * scores of the best nodes matching the attribute used for each of the
      * resource's relevant colocations.
      *
      * \param[in,out] rsc      Resource to check colocations for
      * \param[in]     log_id   Resource ID to use in logs (if NULL, use rsc ID)
      * \param[in,out] nodes    Nodes to update
      * \param[in]     attr     Colocation attribute (NULL to use default)
      * \param[in]     factor   Incorporate scores multiplied by this factor
      * \param[in]     flags    Bitmask of enum pcmk__coloc_select values
      *
      * \note The caller remains responsible for freeing \p *nodes.
      */
     void (*add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id,
                                       GHashTable **nodes, const char *attr,
                                       float factor,
                                       enum pcmk__coloc_select flags);
 
     /*!
      * \internal
      * \brief Create list of all resources in colocations with a given resource
      *
      * Given a resource, create a list of all resources involved in mandatory
      * colocations with it, whether directly or indirectly via chained colocations.
      *
      * \param[in] rsc             Resource to add to colocated list
      * \param[in] orig_rsc        Resource originally requested
      * \param[in] colocated_rscs  Existing list
      *
      * \return List of given resource and all resources involved in colocations
      *
      * \note This function is recursive; top-level callers should pass NULL as
      *       \p colocated_rscs and \p orig_rsc, and the desired resource as
      *       \p rsc. The recursive calls will use other values.
      */
     GList *(*colocated_resources)(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                   GList *colocated_rscs);
 
     /*!
      * \internal
      * \brief Apply a location constraint to a resource's allowed node scores
      *
      * \param[in,out] rsc       Resource to apply constraint to
      * \param[in,out] location  Location constraint to apply
      */
     void (*apply_location)(pe_resource_t *rsc, pe__location_t *location);
 
     /*!
      * \internal
      * \brief Return action flags for a given resource action
      *
      * \param[in,out] action  Action to get flags for
      * \param[in]     node    If not NULL, limit effects to this node
      *
      * \return Flags appropriate to \p action on \p node
      * \note For primitives, this will be the same as action->flags regardless
      *       of node. For collective resources, the flags can differ due to
      *       multiple instances possibly being involved.
      */
     enum pe_action_flags (*action_flags)(pe_action_t *action,
                                          const pe_node_t *node);
 
     /*!
      * \internal
      * \brief Update two actions according to an ordering between them
      *
      * Given information about an ordering of two actions, update the actions'
      * flags (and runnable_before members if appropriate) as appropriate for the
      * ordering. In some cases, the ordering could be disabled as well.
      *
      * \param[in] first     'First' action in an ordering
      * \param[in] then      'Then' action in an ordering
      * \param[in] node      If not NULL, limit scope of ordering to this node
      *                      (only used when interleaving instances)
      * \param[in] flags     Action flags for \p first for ordering purposes
      * \param[in] filter    Action flags to limit scope of certain updates (may
      *                      include pe_action_optional to affect only mandatory
      *                      actions, and pe_action_runnable to affect only
      *                      runnable actions)
      * \param[in] type      Group of enum pe_ordering flags to apply
      * \param[in] data_set  Cluster working set
      *
      * \return Group of enum pcmk__updated flags indicating what was updated
      */
     uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then,
                                        pe_node_t *node, uint32_t flags,
                                        uint32_t filter, uint32_t type,
                                        pe_working_set_t *data_set);
 
     void (*output_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add a resource's actions to the transition graph
      *
      * \param[in] rsc  Resource whose actions should be added
      */
     void (*add_actions_to_graph)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add meta-attributes relevant to transition graph actions to XML
      *
      * If a given resource supports variant-specific meta-attributes that are
      * needed for transition graph actions, add them to a given XML element.
      *
      * \param[in]     rsc  Resource whose meta-attributes should be added
      * \param[in,out] xml  Transition graph action attributes XML to add to
      */
     void (*add_graph_meta)(pe_resource_t *rsc, xmlNode *xml);
 
     /*!
      * \internal
      * \brief Add a resource's utilization to a table of utilization values
      *
      * This function is used when summing the utilization of a resource and all
      * resources colocated with it, to determine whether a node has sufficient
      * capacity. Given a resource and a table of utilization values, it will add
      * the resource's utilization to the existing values, if the resource has
      * not yet been allocated to a node.
      *
-     * \param[in] rsc          Resource with utilization to add
-     * \param[in] orig_rsc     Resource being allocated (for logging only)
-     * \param[in] all_rscs     List of all resources that will be summed
-     * \param[in] utilization  Table of utilization values to add to
+     * \param[in]     rsc          Resource with utilization to add
+     * \param[in]     orig_rsc     Resource being allocated (for logging only)
+     * \param[in]     all_rscs     List of all resources that will be summed
+     * \param[in,out] utilization  Table of utilization values to add to
      */
-    void (*add_utilization)(pe_resource_t *rsc, pe_resource_t *orig_rsc,
-                            GList *all_rscs, GHashTable *utilization);
+    void (*add_utilization)(const pe_resource_t *rsc,
+                            const pe_resource_t *orig_rsc, GList *all_rscs,
+                            GHashTable *utilization);
 
     /*!
      * \internal
      * \brief Apply a shutdown lock for a resource, if appropriate
      *
      * \param[in] rsc       Resource to check for shutdown lock
      */
     void (*shutdown_lock)(pe_resource_t *rsc);
 };
 
 // Actions (pcmk_sched_actions.c)
 
 G_GNUC_INTERNAL
 void pcmk__update_action_for_orderings(pe_action_t *action,
                                        pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
                                       pe_node_t *node, uint32_t flags,
                                       uint32_t filter, uint32_t type,
                                       pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
                                      guint interval_ms, const pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__deduplicate_action_inputs(pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__output_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
                                xmlNode *xml_op);
 
 G_GNUC_INTERNAL
 void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
 
 
 // Recurring actions (pcmk_sched_recurring.c)
 
 G_GNUC_INTERNAL
 void pcmk__create_recurring_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id,
                            const char *task, guint interval_ms,
                            const pe_node_t *node, const char *reason);
 
 G_GNUC_INTERNAL
 void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
                                 guint interval_ms, pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_is_recurring(const pe_action_t *action);
 
 
 // Producing transition graphs (pcmk_graph_producer.c)
 
 G_GNUC_INTERNAL
 bool pcmk__graph_has_loop(pe_action_t *init_action, pe_action_t *action,
                           pe_action_wrapper_t *input);
 
 G_GNUC_INTERNAL
 void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_graph(pe_working_set_t *data_set);
 
 
 // Fencing (pcmk_sched_fencing.c)
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node,
                             pe_action_t *action, enum pe_ordering order);
 
 G_GNUC_INTERNAL
 void pcmk__fence_guest(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__node_unfenced(pe_node_t *node);
 
 
 // Injected scheduler inputs (pcmk_sched_injections.c)
 
 void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
                                   pcmk_injections_t *injections);
 
 
 // Constraints of any type (pcmk_sched_constraints.c)
 
 G_GNUC_INTERNAL
 pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
                                    pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__valid_resource_or_tag(pe_working_set_t *data_set, const char *id,
                                  pe_resource_t **rsc, pe_tag_t **tag);
 
 G_GNUC_INTERNAL
 bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
                       bool convert_rsc, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__create_internal_constraints(pe_working_set_t *data_set);
 
 
 // Location constraints
 
 G_GNUC_INTERNAL
 void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
                                    int node_weight, const char *discover_mode,
                                    pe_node_t *foo_node,
                                    pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_locations(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint);
 
 
 // Colocation constraints (pcmk_sched_colocation.c)
 
 enum pcmk__coloc_affects {
     pcmk__coloc_affects_nothing = 0,
     pcmk__coloc_affects_location,
     pcmk__coloc_affects_role,
 };
 
 G_GNUC_INTERNAL
 enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t *dependent,
                                                   const pe_resource_t *primary,
                                                   const pcmk__colocation_t *colocation,
                                                   bool preview);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
                                   const pe_resource_t *primary,
                                   const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
                                      GHashTable **nodes, const char *attr,
                                      float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__new_colocation(const char *id, const char *node_attr, int score,
                           pe_resource_t *dependent, pe_resource_t *primary,
                           const char *dependent_role, const char *primary_role,
                           bool influence, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__block_colocated_starts(pe_action_t *action,
                                   pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Check whether colocation's dependent preferences should be considered
  *
  * \param[in] colocation  Colocation constraint
  * \param[in] rsc         Primary instance (normally this will be
  *                        colocation->primary, which NULL will be treated as,
  *                        but for clones or bundles with multiple instances
  *                        this can be a particular instance)
  *
  * \return true if colocation influence should be effective, otherwise false
  */
 static inline bool
 pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
                                const pe_resource_t *rsc)
 {
     if (rsc == NULL) {
         rsc = colocation->primary;
     }
 
     /* A bundle replica colocates its remote connection with its container,
      * using a finite score so that the container can run on Pacemaker Remote
      * nodes.
      *
      * Moving a connection is lightweight and does not interrupt the service,
      * while moving a container is heavyweight and does interrupt the service,
      * so don't move a clean, active container based solely on the preferences
      * of its connection.
      *
      * This also avoids problematic scenarios where two containers want to
      * perpetually swap places.
      */
     if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes)
         && !pcmk_is_set(rsc->flags, pe_rsc_failed)
         && pcmk__list_of_1(rsc->running_on)) {
         return false;
     }
 
     /* The dependent in a colocation influences the primary's location
      * if the influence option is true or the primary is not yet active.
      */
     return colocation->influence || (rsc->running_on == NULL);
 }
 
 
 // Ordering constraints (pcmk_sched_ordering.c)
 
 G_GNUC_INTERNAL
 void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task,
                         pe_action_t *first_action, pe_resource_t *then_rsc,
                         char *then_task, pe_action_t *then_action,
                         enum pe_ordering type, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_stops_before_shutdown(pe_node_t *node,
                                        pe_action_t *shutdown_op);
 
 G_GNUC_INTERNAL
 void pcmk__apply_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_after_each(pe_action_t *after, GList *list);
 
 
 /*!
  * \internal
  * \brief Create a new ordering between two resource actions
  *
  * \param[in] first_rsc   Resource for 'first' action
  * \param[in] then_rsc    Resource for 'then' action
  * \param[in] first_task  Action key for 'first' action
  * \param[in] then_task   Action key for 'then' action
  * \param[in] flags       Bitmask of enum pe_ordering flags
  * \param[in] data_set    Cluster working set to add ordering to
  */
 #define pcmk__order_resource_actions(first_rsc, first_task,                 \
                                      then_rsc, then_task, flags)            \
     pcmk__new_ordering((first_rsc),                                         \
                        pcmk__op_key((first_rsc)->id, (first_task), 0),      \
                        NULL,                                                \
                        (then_rsc),                                          \
                        pcmk__op_key((then_rsc)->id, (then_task), 0),        \
                        NULL, (flags), (first_rsc)->cluster)
 
 #define pcmk__order_starts(rsc1, rsc2, type)                 \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_START,  \
                                  (rsc2), CRMD_ACTION_START, (type))
 
 #define pcmk__order_stops(rsc1, rsc2, type)                  \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP,   \
                                  (rsc2), CRMD_ACTION_STOP, (type))
 
 
 // Ticket constraints (pcmk_sched_tickets.c)
 
 G_GNUC_INTERNAL
 void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 
 // Promotable clone resources (pcmk_sched_promotable.c)
 
 G_GNUC_INTERNAL
 void pcmk__add_promotion_scores(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__require_promotion_tickets(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__set_instance_roles(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_promotable_actions(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__promotable_restart_ordering(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__order_promotable_instances(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
                                             pe_resource_t *dependent,
                                             const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
                                                 pe_resource_t *dependent,
                                                 const pcmk__colocation_t *colocation);
 
 
 // Pacemaker Remote nodes (pcmk_sched_remote.c)
 
 G_GNUC_INTERNAL
 bool pcmk__is_failed_remote_node(pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__connection_host_for_action(pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
 
 G_GNUC_INTERNAL
 void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action);
 
 
 // Primitives (pcmk_sched_primitive.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 enum pe_action_flags pcmk__primitive_action_flags(pe_action_t *action,
                                                   const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
                                        const pe_resource_t *primary,
                                        const pcmk__colocation_t *colocation,
                                        bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node,
                             bool optional);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_add_graph_meta(pe_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
-void pcmk__primitive_add_utilization(pe_resource_t *rsc,
-                                     pe_resource_t *orig_rsc, GList *all_rscs,
-                                     GHashTable *utilization);
+void pcmk__primitive_add_utilization(const pe_resource_t *rsc,
+                                     const pe_resource_t *orig_rsc,
+                                     GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_shutdown_lock(pe_resource_t *rsc);
 
 
 // Groups (pcmk_sched_group.c)
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_coloc_score(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc,
                                            const char *log_id,
                                            GHashTable **nodes, const char *attr,
                                            float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 GList *pcmk__group_colocated_resources(pe_resource_t *rsc,
                                        pe_resource_t *orig_rsc,
                                        GList *colocated_rscs);
 
 // Clones (pcmk_sched_clone.c)
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 // Bundles (pcmk_sched_bundle.c)
 
 G_GNUC_INTERNAL
 void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
                                     const pe_resource_t *primary,
                                     const pcmk__colocation_t *colocation,
                                     bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__output_bundle_actions(pe_resource_t *rsc);
 
 
 // Injections (pcmk_injections.c)
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
                                         bool up);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
                                        const char *resource,
                                        const char *lrm_name,
                                        const char *rclass,
                                        const char *rtype,
                                        const char *rprovider);
 
 G_GNUC_INTERNAL
 void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
                             const char *resource, const char *task,
                             guint interval_ms, int rc);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
                                     lrmd_event_data_t *op, int target_rc);
 
 
 // Nodes (pcmk_sched_nodes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__node_available(const pe_node_t *node, bool consider_score,
                           bool consider_guest);
 
 G_GNUC_INTERNAL
 bool pcmk__any_node_available(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GHashTable *pcmk__copy_node_table(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node);
 
 G_GNUC_INTERNAL
 void pcmk__apply_node_health(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
                                   const pe_node_t *node);
 
 
 // Functions applying to more than one variant (pcmk_sched_resource.c)
 
 G_GNUC_INTERNAL
 void pcmk__set_allocation_methods(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
                              const xmlNode *rsc_entry, bool active_on_node);
 
 G_GNUC_INTERNAL
 GList *pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 GList *pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                  GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__output_resource_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen,
                                bool force);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
 
 G_GNUC_INTERNAL
 void pcmk__unassign_resource(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node,
                              pe_resource_t **failed);
 
 G_GNUC_INTERNAL
 void pcmk__sort_resources(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
 
 
 // Functions related to probes (pcmk_sched_probes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_probes(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_probes(pe_working_set_t *data_set);
 
 
 // Functions related to live migration (pcmk_sched_migration.c)
 
 void pcmk__create_migration_actions(pe_resource_t *rsc,
                                     const pe_node_t *current);
 
 void pcmk__abort_dangling_migration(void *data, void *user_data);
 
 bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current);
 
 void pcmk__order_migration_equivalents(pe__ordering_t *order);
 
 
 // Functions related to node utilization (pcmk_sched_utilization.c)
 
 G_GNUC_INTERNAL
 int pcmk__compare_node_capacities(const pe_node_t *node1,
                                   const pe_node_t *node2);
 
 G_GNUC_INTERNAL
 void pcmk__consume_node_capacity(GHashTable *current_utilization,
                                  pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__release_node_capacity(GHashTable *current_utilization,
                                  const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_utilization_constraints(pe_resource_t *rsc,
                                           GList *allowed_nodes);
 
 G_GNUC_INTERNAL
 void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
 
 #endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index 0190750c48..95b9475b2a 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1159 +1,1160 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define PE__VARIANT_BUNDLE 1
 #include <lib/pengine/variant.h>
 
 static bool
 is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
 {
     for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (node->details == replica->node->details) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                          int max, int per_host_max, pe_working_set_t * data_set);
 
 static GList *
 get_container_list(const pe_resource_t *rsc)
 {
     GList *containers = NULL;
 
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             containers = g_list_append(containers, replica->container);
         }
     }
     return containers;
 }
 
 static inline GList *
 get_containers_or_children(const pe_resource_t *rsc)
 {
     return (rsc->variant == pe_container)?
            get_container_list(rsc) : rsc->children;
 }
 
 /*!
  * \internal
  * \brief Assign a bundle resource to a node
  *
  * \param[in,out] rsc     Resource to assign to a node
  * \param[in]     prefer  Node to prefer, if all else is equal
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  */
 pe_node_t *
 pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer)
 {
     GList *containers = NULL;
     GList *nodes = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     containers = get_container_list(rsc);
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     nodes = g_hash_table_get_values(rsc->allowed_nodes);
     nodes = pcmk__sort_nodes(nodes, NULL);
     containers = g_list_sort(containers, pcmk__cmp_instance);
     distribute_children(rsc, containers, nodes, bundle_data->nreplicas,
                         bundle_data->nreplicas_per_host, rsc->cluster);
     g_list_free(nodes);
     g_list_free(containers);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         pe_node_t *container_host = NULL;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
                          rsc->id, replica->ip->id);
             replica->ip->cmds->assign(replica->ip, prefer);
         }
 
         container_host = replica->container->allocated_to;
         if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
             /* We need 'nested' connection resources to be on the same
              * host because pacemaker-remoted only supports a single
              * active connection
              */
             pcmk__new_colocation("child-remote-with-docker-remote", NULL,
                                  INFINITY, replica->remote,
                                  container_host->details->remote_rsc, NULL,
                                  NULL, true, rsc->cluster);
         }
 
         if (replica->remote) {
             pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
                          rsc->id, replica->remote->id);
             replica->remote->cmds->assign(replica->remote, prefer);
         }
 
         // Explicitly allocate replicas' children before bundle child
         if (replica->child) {
             pe_node_t *node = NULL;
             GHashTableIter iter;
 
             g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
                 if (node->details != replica->node->details) {
                     node->weight = -INFINITY;
                 } else if (!pcmk__threshold_reached(replica->child, node,
                                                     NULL)) {
                     node->weight = INFINITY;
                 }
             }
 
             pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
             pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
                          rsc->id, replica->child->id);
             replica->child->cmds->assign(replica->child, replica->node);
             pe__clear_resource_flags(replica->child->parent,
                                        pe_rsc_allocating);
         }
     }
 
     if (bundle_data->child) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
             if (is_bundle_node(bundle_data, node)) {
                 node->weight = 0;
             } else {
                 node->weight = -INFINITY;
             }
         }
         pe_rsc_trace(rsc, "Allocating bundle %s child %s",
                      rsc->id, bundle_data->child->id);
         bundle_data->child->cmds->assign(bundle_data->child, prefer);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
     return NULL;
 }
 
 
 void
 pcmk__bundle_create_actions(pe_resource_t *rsc)
 {
     pe_action_t *action = NULL;
     GList *containers = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     containers = get_container_list(rsc);
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             replica->ip->cmds->create_actions(replica->ip);
         }
         if (replica->container) {
             replica->container->cmds->create_actions(replica->container);
         }
         if (replica->remote) {
             replica->remote->cmds->create_actions(replica->remote);
         }
     }
 
     clone_create_pseudo_actions(rsc, containers, NULL, NULL);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->create_actions(bundle_data->child);
 
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             /* promote */
             pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
             action->priority = INFINITY;
 
             /* demote */
             pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
             action->priority = INFINITY;
         }
     }
 
     g_list_free(containers);
 }
 
 void
 pcmk__bundle_internal_constraints(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child,
                                      RSC_START, pe_order_implies_first_printed);
         pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child,
                                      RSC_STOP, pe_order_implies_first_printed);
 
         if (bundle_data->child->children) {
             pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed);
             pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed);
         } else {
             pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed);
             pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed);
         }
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         CRM_ASSERT(replica->container);
 
         replica->container->cmds->internal_constraints(replica->container);
 
         pcmk__order_starts(rsc, replica->container,
                            pe_order_runnable_left|pe_order_implies_first_printed);
 
         if (replica->child) {
             pcmk__order_stops(rsc, replica->child,
                               pe_order_implies_first_printed);
         }
         pcmk__order_stops(rsc, replica->container,
                           pe_order_implies_first_printed);
         pcmk__order_resource_actions(replica->container, RSC_START, rsc,
                                      RSC_STARTED,
                                      pe_order_implies_then_printed);
         pcmk__order_resource_actions(replica->container, RSC_STOP, rsc,
                                      RSC_STOPPED,
                                      pe_order_implies_then_printed);
 
         if (replica->ip) {
             replica->ip->cmds->internal_constraints(replica->ip);
 
             // Start IP then container
             pcmk__order_starts(replica->ip, replica->container,
                                pe_order_runnable_left|pe_order_preserve);
             pcmk__order_stops(replica->container, replica->ip,
                               pe_order_implies_first|pe_order_preserve);
 
             pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
                                  replica->container, NULL, NULL, true,
                                  rsc->cluster);
         }
 
         if (replica->remote) {
             /* This handles ordering and colocating remote relative to container
              * (via "resource-with-container"). Since IP is also ordered and
              * colocated relative to the container, we don't need to do anything
              * explicit here with IP.
              */
             replica->remote->cmds->internal_constraints(replica->remote);
         }
 
         if (replica->child) {
             CRM_ASSERT(replica->remote);
 
             // "Start remote then child" is implicit in scheduler's remote logic
         }
 
     }
 
     if (bundle_data->child) {
         bundle_data->child->cmds->internal_constraints(bundle_data->child);
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             pcmk__promotable_restart_ordering(rsc);
 
             /* child demoted before global demoted */
             pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc,
                                          RSC_DEMOTED,
                                          pe_order_implies_then_printed);
 
             /* global demote before child demote */
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child,
                                          RSC_DEMOTE,
                                          pe_order_implies_first_printed);
 
             /* child promoted before global promoted */
             pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc,
                                          RSC_PROMOTED,
                                          pe_order_implies_then_printed);
 
             /* global promote before child promote */
             pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child,
                                          RSC_PROMOTE,
                                          pe_order_implies_first_printed);
         }
     }
 }
 
 static pe_resource_t *
 compatible_replica_for_node(const pe_resource_t *rsc_lh,
                             const pe_node_t *candidate,
                             const pe_resource_t *rsc, enum rsc_role_e filter,
                             gboolean current)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(candidate != NULL, return NULL);
     get_bundle_variant_data(bundle_data, rsc);
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               rsc_lh->id, rsc->id, pe__node_name(candidate));
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (is_child_compatible(replica->container, candidate, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       rsc_lh->id, replica->container->id,
                       pe__node_name(candidate));
             return replica->container;
         }
     }
 
     crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
     return NULL;
 }
 
 static pe_resource_t *
 compatible_replica(const pe_resource_t *rsc_lh, const pe_resource_t *rsc,
                    enum rsc_role_e filter, gboolean current,
                    pe_working_set_t *data_set)
 {
     GList *scratch = NULL;
     pe_resource_t *pair = NULL;
     pe_node_t *active_node_lh = NULL;
 
     active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
     if (active_node_lh) {
         return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
                                            current);
     }
 
     scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL);
 
     for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
   done:
     g_list_free(scratch);
     return pair;
 }
 
 int copies_per_node(pe_resource_t * rsc) 
 {
     /* Strictly speaking, there should be a 'copies_per_node' addition
      * to the resource function table and each case would be a
      * function.  However that would be serious overkill to return an
      * int.  In fact, it seems to me that both function tables
      * could/should be replaced by resources.{c,h} full of
      * rsc_{some_operation} functions containing a switch as below
      * which calls out to functions named {variant}_{some_operation}
      * as needed.
      */
     switch(rsc->variant) {
         case pe_unknown:
             return 0;
         case pe_native:
         case pe_group:
             return 1;
         case pe_clone:
             {
                 const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
                 if (max_clones_node == NULL) {
                     return 1;
 
                 } else {
                     int max_i;
 
                     pcmk__scan_min_int(max_clones_node, &max_i, 0);
                     return max_i;
                 }
             }
         case pe_container:
             {
                 pe__bundle_variant_data_t *data = NULL;
                 get_bundle_variant_data(data, rsc);
                 return data->nreplicas_per_host;
             }
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
                                const pe_resource_t *primary,
                                const pcmk__colocation_t *colocation,
                                bool for_dependent)
 {
     GList *allocated_primaries = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     /* This should never be called for the bundle itself as a dependent.
      * Instead, we add its colocation constraints to its replicas and call the
      * apply_coloc_score() for the replicas as dependents.
      */
     CRM_ASSERT(!for_dependent);
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
     CRM_ASSERT(dependent->variant == pe_native);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary, "%s is still provisional", primary->id);
         return;
 
     } else if (colocation->dependent->variant > pe_group) {
         pe_resource_t *primary_replica = compatible_replica(dependent, primary,
                                                             RSC_ROLE_UNKNOWN,
                                                             FALSE,
                                                             dependent->cluster);
 
         if (primary_replica) {
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_replica->id);
             dependent->cmds->apply_coloc_score(dependent, primary_replica,
                                                colocation, true);
 
         } else if (colocation->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true);
 
         } else {
             pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
                          dependent->id, primary->id);
         }
 
         return;
     }
 
     get_bundle_variant_data(bundle_data, primary);
     pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
                  colocation->id, dependent->id, primary->id, colocation->score);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (colocation->score < INFINITY) {
             replica->container->cmds->apply_coloc_score(dependent,
                                                         replica->container,
                                                         colocation, false);
 
         } else {
             pe_node_t *chosen = replica->container->fns->location(replica->container,
                                                                   NULL, FALSE);
 
             if ((chosen == NULL)
                 || is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
                 continue;
             }
             if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
                 && (replica->child == NULL)) {
                 continue;
             }
             if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
                 && (replica->child->next_role < RSC_ROLE_PROMOTED)) {
                 continue;
             }
 
             pe_rsc_trace(primary, "Allowing %s: %s %d",
                          colocation->id, pe__node_name(chosen), chosen->weight);
             allocated_primaries = g_list_prepend(allocated_primaries, chosen);
         }
     }
 
     if (colocation->score >= INFINITY) {
         node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE);
     }
     g_list_free(allocated_primaries);
 }
 
 enum pe_action_flags
 pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     GList *containers = NULL;
     enum pe_action_flags flags = 0;
     pe__bundle_variant_data_t *data = NULL;
 
     get_bundle_variant_data(data, action->rsc);
     if(data->child) {
         enum action_tasks task = get_complex_task(data->child, action->task, TRUE);
         switch(task) {
             case no_action:
             case action_notify:
             case action_notified:
             case action_promote:
             case action_promoted:
             case action_demote:
             case action_demoted:
                 return summary_action_flags(action, data->child->children, node);
             default:
                 break;
         }
     }
 
     containers = get_container_list(action->rsc);
     flags = summary_action_flags(action, containers, node);
     g_list_free(containers);
     return flags;
 }
 
 pe_resource_t *
 find_compatible_child_by_node(const pe_resource_t *local_child,
                               const pe_node_t *local_node,
                               const pe_resource_t *rsc, enum rsc_role_e filter,
                               gboolean current)
 {
     GList *gIter = NULL;
     GList *children = NULL;
 
     if (local_node == NULL) {
         crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id);
         return NULL;
     }
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               local_child->id, rsc->id, pe__node_name(local_node));
 
     children = get_containers_or_children(rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if(is_child_compatible(child_rsc, local_node, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       local_child->id, child_rsc->id, pe__node_name(local_node));
             return child_rsc;
         }
     }
 
     crm_trace("Can't pair %s with %s", local_child->id, rsc->id);
     if(children != rsc->children) {
         g_list_free(children);
     }
     return NULL;
 }
 
 static pe__bundle_replica_t *
 replica_for_container(pe_resource_t *rsc, pe_resource_t *container,
                       pe_node_t *node)
 {
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->child
                 && (container == replica->container)
                 && (node->details == replica->node->details)) {
                 return replica;
             }
         }
     }
     return NULL;
 }
 
 static uint32_t
 multi_update_interleave_actions(pe_action_t *first, pe_action_t *then,
                                 pe_node_t *node, uint32_t filter, uint32_t type,
                                 pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
     GList *children = NULL;
     gboolean current = FALSE;
     uint32_t changed = pcmk__updated_none;
 
     /* Fix this - lazy */
     if (pcmk__ends_with(first->uuid, "_stopped_0")
         || pcmk__ends_with(first->uuid, "_demoted_0")) {
         current = TRUE;
     }
 
     children = get_containers_or_children(then->rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *then_child = gIter->data;
         pe_resource_t *first_child = find_compatible_child(then_child,
                                                            first->rsc,
                                                            RSC_ROLE_UNKNOWN,
                                                            current);
         if (first_child == NULL && current) {
             crm_trace("Ignore");
 
         } else if (first_child == NULL) {
             crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid);
 
             /* Me no like this hack - but what else can we do?
              *
              * If there is no-one active or about to be active
              *   on the same node as then_child, then they must
              *   not be allowed to start
              */
             if (pcmk_any_flags_set(type, pe_order_runnable_left|pe_order_implies_then) /* Mandatory */ ) {
                 pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id);
                 if (pcmk__assign_resource(then_child, NULL, true)) {
                     pcmk__set_updated_flags(changed, first, pcmk__updated_then);
                 }
             }
 
         } else {
             pe_action_t *first_action = NULL;
             pe_action_t *then_action = NULL;
 
             enum action_tasks task = clone_child_action(first);
             const char *first_task = task2text(task);
 
             pe__bundle_replica_t *first_replica = NULL;
             pe__bundle_replica_t *then_replica = NULL;
 
             first_replica = replica_for_container(first->rsc, first_child,
                                                   node);
             if (strstr(first->task, "stop") && first_replica && first_replica->child) {
                 /* Except for 'stopped' we should be looking at the
                  * in-container resource, actions for the child will
                  * happen later and are therefor more likely to align
                  * with the user's intent.
                  */
                 first_action = find_first_action(first_replica->child->actions,
                                                  NULL, task2text(task), node);
             } else {
                 first_action = find_first_action(first_child->actions, NULL, task2text(task), node);
             }
 
             then_replica = replica_for_container(then->rsc, then_child, node);
             if (strstr(then->task, "mote")
                 && then_replica && then_replica->child) {
                 /* Promote/demote actions will never be found for the
                  * container resource, look in the child instead
                  *
                  * Alternatively treat:
                  *  'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and
                  *  'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY'
                  */
                 then_action = find_first_action(then_replica->child->actions,
                                                 NULL, then->task, node);
             } else {
                 then_action = find_first_action(then_child->actions, NULL, then->task, node);
             }
 
             if (first_action == NULL) {
                 if (!pcmk_is_set(first_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (first)",
                             first_task, first_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (first)",
                               first_task, first_child->id,
                               pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             /* We're only interested if 'then' is neither stopping nor being demoted */ 
             if (then_action == NULL) {
                 if (!pcmk_is_set(then_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (then)",
                             then->task, then_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (then)",
                               then->task, then_child->id,
                               pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             if (order_actions(first_action, then_action, type)) {
                 crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x",
                           first_action->uuid,
                           pcmk_is_set(first_action->flags, pe_action_optional),
                           then_action->uuid,
                           pcmk_is_set(then_action->flags, pe_action_optional),
                           type);
                 pcmk__set_updated_flags(changed, first,
                                         pcmk__updated_first|pcmk__updated_then);
             }
             if(first_action && then_action) {
                 changed |= then_child->cmds->update_ordered_actions(first_action,
                                                                     then_action,
                                                                     node,
                                                                     first_child->cmds->action_flags(first_action, node),
                                                                     filter,
                                                                     type,
                                                                     data_set);
             } else {
                 crm_err("Nothing found either for %s (%p) or %s (%p) %s",
                         first_child->id, first_action,
                         then_child->id, then_action, task2text(task));
             }
         }
     }
 
     if(children != then->rsc->children) {
         g_list_free(children);
     }
     return changed;
 }
 
 static bool
 can_interleave_actions(pe_action_t *first, pe_action_t *then)
 {
     bool interleave = FALSE;
     pe_resource_t *rsc = NULL;
     const char *interleave_s = NULL;
 
     if(first->rsc == NULL || then->rsc == NULL) {
         crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc == then->rsc) {
         crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) {
         crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid);
         return FALSE;
     }
 
     if (pcmk__ends_with(then->uuid, "_stop_0")
         || pcmk__ends_with(then->uuid, "_demote_0")) {
         rsc = first->rsc;
     } else {
         rsc = then->rsc;
     }
 
     interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
     interleave = crm_is_true(interleave_s);
     crm_trace("Interleave %s -> %s: %s (based on %s)",
               first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id);
 
     return interleave;
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two actions, update the actions'
  * flags (and runnable_before members if appropriate) as appropriate for the
  * ordering. In some cases, the ordering could be disabled as well.
  *
  * \param[in] first     'First' action in an ordering
  * \param[in] then      'Then' action in an ordering
  * \param[in] node      If not NULL, limit scope of ordering to this node
  *                      (only used when interleaving instances)
  * \param[in] flags     Action flags for \p first for ordering purposes
  * \param[in] filter    Action flags to limit scope of certain updates (may
  *                      include pe_action_optional to affect only mandatory
  *                      actions, and pe_action_runnable to affect only
  *                      runnable actions)
  * \param[in] type      Group of enum pe_ordering flags to apply
  * \param[in] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then,
                            pe_node_t *node, uint32_t flags, uint32_t filter,
                            uint32_t type, pe_working_set_t *data_set)
 {
     uint32_t changed = pcmk__updated_none;
 
     crm_trace("%s -> %s", first->uuid, then->uuid);
 
     if(can_interleave_actions(first, then)) {
         changed = multi_update_interleave_actions(first, then, node, filter,
                                                   type, data_set);
 
     } else if(then->rsc) {
         GList *gIter = NULL;
         GList *children = NULL;
 
         // Handle the 'primitive' ordering case
         changed |= pcmk__update_ordered_actions(first, then, node, flags,
                                                 filter, type, data_set);
 
         // Now any children (or containers in the case of a bundle)
         children = get_containers_or_children(then->rsc);
         for (gIter = children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *then_child = (pe_resource_t *) gIter->data;
             uint32_t then_child_changed = pcmk__updated_none;
             pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node);
 
             if (then_child_action) {
                 uint32_t then_child_flags = then_child->cmds->action_flags(then_child_action,
                                                                            node);
 
                 if (pcmk_is_set(then_child_flags, pe_action_runnable)) {
                     then_child_changed |= then_child->cmds->update_ordered_actions(first,
                                                                                    then_child_action,
                                                                                    node,
                                                                                    flags,
                                                                                    filter,
                                                                                    type,
                                                                                    data_set);
                 }
                 changed |= then_child_changed;
                 if (pcmk_is_set(then_child_changed, pcmk__updated_then)) {
                     for (GList *lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) {
                         pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data;
 
                         pcmk__update_action_for_orderings(next->action,
                                                           data_set);
                     }
                 }
             }
         }
 
         if(children != then->rsc->children) {
             g_list_free(children);
         }
     }
     return changed;
 }
 
 void
 pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     get_bundle_variant_data(bundle_data, rsc);
 
     pcmk__apply_location(rsc, constraint);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->container) {
             replica->container->cmds->apply_location(replica->container,
                                                      constraint);
         }
         if (replica->ip) {
             replica->ip->cmds->apply_location(replica->ip, constraint);
         }
     }
 
     if (bundle_data->child
         && ((constraint->role_filter == RSC_ROLE_UNPROMOTED)
             || (constraint->role_filter == RSC_ROLE_PROMOTED))) {
         bundle_data->child->cmds->apply_location(bundle_data->child,
                                                  constraint);
         bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
                                                           constraint);
     }
 }
 
 /*!
  * \internal
  * \brief Add a resource's actions to the transition graph
  *
  * \param[in] rsc  Resource whose actions should be added
  */
 void
 pcmk__bundle_expand(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->add_actions_to_graph(bundle_data->child);
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->remote && replica->container
             && pe__bundle_needs_remote_name(replica->remote, rsc->cluster)) {
 
             /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
              * run pacemaker-remoted inside, without needing a separate IP for
              * the container. This is done by configuring the inner remote's
              * connection host as the magic string "#uname", then
              * replacing it with the underlying host when needed.
              */
             xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
                                                replica->remote->xml, LOG_ERR);
             const char *calculated_addr = NULL;
 
             // Replace the value in replica->remote->xml (if appropriate)
             calculated_addr = pe__add_bundle_remote_name(replica->remote,
                                                          rsc->cluster,
                                                          nvpair, "value");
             if (calculated_addr) {
                 /* Since this is for the bundle as a resource, and not any
                  * particular action, replace the value in the default
                  * parameters (not evaluated for node). create_graph_action()
                  * will grab it from there to replace it in node-evaluated
                  * parameters.
                  */
                 GHashTable *params = pe_rsc_params(replica->remote,
                                                    NULL, rsc->cluster);
 
                 g_hash_table_replace(params,
                                      strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                      strdup(calculated_addr));
             } else {
                 /* The only way to get here is if the remote connection is
                  * neither currently running nor scheduled to run. That means we
                  * won't be doing any operations that require addr (only start
                  * requires it; we additionally use it to compare digests when
                  * unpacking status, promote, and migrate_from history, but
                  * that's already happened by this point).
                  */
                 crm_info("Unable to determine address for bundle %s remote connection",
                          rsc->id);
             }
         }
         if (replica->ip) {
             replica->ip->cmds->add_actions_to_graph(replica->ip);
         }
         if (replica->container) {
             replica->container->cmds->add_actions_to_graph(replica->container);
         }
         if (replica->remote) {
             replica->remote->cmds->add_actions_to_graph(replica->remote);
         }
     }
 }
 
 /*!
  * \internal
  *
  * \brief Schedule any probes needed for a resource on a node
  *
  * \param[in] rsc   Resource to create probe for
  * \param[in] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node)
 {
     bool any_created = false;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return false);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if ((replica->ip != NULL)
             && replica->ip->cmds->create_probe(replica->ip, node)) {
             any_created = true;
         }
         if ((replica->child != NULL) && (node->details == replica->node->details)
             && replica->child->cmds->create_probe(replica->child, node)) {
             any_created = true;
         }
         if ((replica->container != NULL)
             && replica->container->cmds->create_probe(replica->container,
                                                       node)) {
             any_created = true;
 
             /* If we're limited to one replica per host (due to
              * the lack of an IP range probably), then we don't
              * want any of our peer containers starting until
              * we've established that no other copies are already
              * running.
              *
              * Partly this is to ensure that nreplicas_per_host is
              * observed, but also to ensure that the containers
              * don't fail to start because the necessary port
              * mappings (which won't include an IP for uniqueness)
              * are already taken
              */
 
             for (GList *tIter = bundle_data->replicas;
                  tIter && (bundle_data->nreplicas_per_host == 1);
                  tIter = tIter->next) {
                 pe__bundle_replica_t *other = tIter->data;
 
                 if ((other != replica) && (other != NULL)
                     && (other->container != NULL)) {
 
                     pcmk__new_ordering(replica->container,
                                        pcmk__op_key(replica->container->id, RSC_STATUS, 0),
                                        NULL, other->container,
                                        pcmk__op_key(other->container->id, RSC_START, 0),
                                        NULL,
                                        pe_order_optional|pe_order_same_node,
                                        rsc->cluster);
                 }
             }
         }
         if ((replica->container != NULL) && (replica->remote != NULL)
             && replica->remote->cmds->create_probe(replica->remote, node)) {
 
             /* Do not probe the remote resource until we know where the
              * container is running. This is required for REMOTE_CONTAINER_HACK
              * to correctly probe remote resources.
              */
             char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
                                                0);
             pe_action_t *probe = find_first_action(replica->remote->actions,
                                                    probe_uuid, NULL, node);
 
             free(probe_uuid);
             if (probe != NULL) {
                 any_created = true;
                 crm_trace("Ordering %s probe on %s",
                           replica->remote->id, pe__node_name(node));
                 pcmk__new_ordering(replica->container,
                                    pcmk__op_key(replica->container->id, RSC_START, 0),
                                    NULL, replica->remote, NULL, probe,
                                    pe_order_probe, rsc->cluster);
             }
         }
     }
     return any_created;
 }
 
 void
 pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml)
 {
 }
 
 void
 pcmk__output_bundle_actions(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip != NULL) {
             replica->ip->cmds->output_actions(replica->ip);
         }
         if (replica->container != NULL) {
             replica->container->cmds->output_actions(replica->container);
         }
         if (replica->remote != NULL) {
             replica->remote->cmds->output_actions(replica->remote);
         }
         if (replica->child != NULL) {
             replica->child->cmds->output_actions(replica->child);
         }
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:add_utilization()
 void
-pcmk__bundle_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
-                             GList *all_rscs, GHashTable *utilization)
+pcmk__bundle_add_utilization(const pe_resource_t *rsc,
+                             const pe_resource_t *orig_rsc, GList *all_rscs,
+                             GHashTable *utilization)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
     if (bundle_data->replicas == NULL) {
         return;
     }
 
     /* All bundle replicas are identical, so using the utilization of the first
      * is sufficient for any. Only the implicit container resource can have
      * utilization values.
      */
     replica = (pe__bundle_replica_t *) bundle_data->replicas->data;
     if (replica->container != NULL) {
         replica->container->cmds->add_utilization(replica->container, orig_rsc,
                                                   all_rscs, utilization);
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
 {
     return; // Bundles currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index 2b758902a8..9a44b33d34 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -1,1194 +1,1195 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define VARIANT_CLONE 1
 #include <lib/pengine/variant.h>
 
 static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all);
 
 static pe_node_t *
 can_run_instance(pe_resource_t * rsc, pe_node_t * node, int limit)
 {
     pe_node_t *local_node = NULL;
 
     if (node == NULL && rsc->allowed_nodes) {
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) {
             can_run_instance(rsc, local_node, limit);
         }
         return NULL;
     }
 
     if (!node) {
         /* make clang analyzer happy */
         goto bail;
 
     } else if (!pcmk__node_available(node, false, false)) {
         goto bail;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         goto bail;
     }
 
     local_node = pcmk__top_allowed_node(rsc, node);
 
     if (local_node == NULL) {
         crm_warn("%s cannot run on %s: node not allowed",
                  rsc->id, pe__node_name(node));
         goto bail;
 
     } else if (local_node->weight < 0) {
         common_update_score(rsc, node->details->id, local_node->weight);
         pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.",
                      rsc->id, pe__node_name(node));
 
     } else if (local_node->count < limit) {
         pe_rsc_trace(rsc, "%s can run on %s (already running %d)",
                      rsc->id, pe__node_name(node), local_node->count);
         return local_node;
 
     } else {
         pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)",
                      rsc->id, pe__node_name(node), local_node->count, limit);
     }
 
   bail:
     if (node) {
         common_update_score(rsc, node->details->id, -INFINITY);
     }
     return NULL;
 }
 
 static pe_node_t *
 allocate_instance(pe_resource_t *rsc, pe_node_t *prefer, gboolean all_coloc,
                   int limit, pe_working_set_t *data_set)
 {
     pe_node_t *chosen = NULL;
     GHashTable *backup = NULL;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)",
                  rsc->id, (prefer? prefer->details->uname: "none"),
                  (all_coloc? "all" : "some"));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->fns->location(rsc, NULL, FALSE);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     /* Only include positive colocation preferences of dependent resources
      * if not every node will get a copy of the clone
      */
     append_parent_colocation(rsc->parent, rsc, all_coloc);
 
     if (prefer) {
         pe_node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
 
         if (local_prefer == NULL || local_prefer->weight < 0) {
             pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id,
                          pe__node_name(prefer));
             return NULL;
         }
     }
 
     can_run_instance(rsc, NULL, limit);
 
     backup = pcmk__copy_node_table(rsc->allowed_nodes);
     pe_rsc_trace(rsc, "Allocating instance %s", rsc->id);
     chosen = rsc->cmds->assign(rsc, prefer);
     if (chosen && prefer && (chosen->details != prefer->details)) {
         crm_info("Not pre-allocating %s to %s because %s is better",
                  rsc->id, pe__node_name(prefer), pe__node_name(chosen));
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = backup;
         pcmk__unassign_resource(rsc);
         chosen = NULL;
         backup = NULL;
     }
     if (chosen) {
         pe_node_t *local_node = pcmk__top_allowed_node(rsc, chosen);
 
         if (local_node) {
             local_node->count++;
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             /* what to do? we can't enforce per-node limits in this case */
             pcmk__config_err("%s not found in %s (list of %d)",
                              chosen->details->id, rsc->parent->id,
                              g_hash_table_size(rsc->parent->allowed_nodes));
         }
     }
 
     if(backup) {
         g_hash_table_destroy(backup);
     }
     return chosen;
 }
 
 static void
 append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all)
 {
 
     GList *gIter = NULL;
 
     gIter = rsc->rsc_cons;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
 
         if (all || cons->score < 0 || cons->score == INFINITY) {
             child->rsc_cons = g_list_prepend(child->rsc_cons, cons);
         }
     }
 
     gIter = rsc->rsc_cons_lhs;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
 
         if (!pcmk__colocation_has_influence(cons, child)) {
            continue;
         }
         if (all || cons->score < 0) {
             child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons);
         }
     }
 }
 
 
 void
 distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                     int max, int per_host_max, pe_working_set_t * data_set);
 
 void
 distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                     int max, int per_host_max, pe_working_set_t * data_set) 
 {
     int loop_max = 0;
     int allocated = 0;
     int available_nodes = 0;
     bool all_coloc = false;
 
     /* count now tracks the number of clones currently allocated */
     for(GList *nIter = nodes; nIter != NULL; nIter = nIter->next) {
         pe_node_t *node = nIter->data;
 
         node->count = 0;
         if (pcmk__node_available(node, false, false)) {
             available_nodes++;
         }
     }
 
     all_coloc = (max < available_nodes) ? true : false;
 
     if(available_nodes) {
         loop_max = max / available_nodes;
     }
     if (loop_max < 1) {
         loop_max = 1;
     }
 
     pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)",
                  max, rsc->id, available_nodes, per_host_max, loop_max);
 
     /* Pre-allocate as many instances as we can to their current location */
     for (GList *gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
         pe_node_t *child_node = NULL;
         pe_node_t *local_node = NULL;
 
         if ((child->running_on == NULL)
             || !pcmk_is_set(child->flags, pe_rsc_provisional)
             || pcmk_is_set(child->flags, pe_rsc_failed)) {
 
             continue;
         }
 
         child_node = pe__current_node(child);
         local_node = pcmk__top_allowed_node(child, child_node);
 
         pe_rsc_trace(rsc,
                      "Checking pre-allocation of %s to %s (%d remaining of %d)",
                      child->id, pe__node_name(child_node), max - allocated,
                      max);
 
         if (!pcmk__node_available(child_node, true, false)) {
             pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s",
                          pe__node_name(child_node), child->id);
             continue;
         }
 
         if ((local_node != NULL) && (local_node->count >= loop_max)) {
             pe_rsc_trace(rsc,
                          "Not pre-allocating because %s already allocated "
                          "optimal instances", pe__node_name(child_node));
             continue;
         }
 
         if (allocate_instance(child, child_node, all_coloc, per_host_max,
                               data_set)) {
             pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id,
                          pe__node_name(child_node));
             allocated++;
         }
     }
 
     pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max);
 
     for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         if (child->running_on != NULL) {
             pe_node_t *child_node = pe__current_node(child);
             pe_node_t *local_node = pcmk__top_allowed_node(child, child_node);
 
             if (local_node == NULL) {
                 crm_err("%s is running on %s which isn't allowed",
                         child->id, pe__node_name(child_node));
             }
         }
 
         if (!pcmk_is_set(child->flags, pe_rsc_provisional)) {
         } else if (allocated >= max) {
             pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max);
             resource_location(child, NULL, -INFINITY, "clone:limit_reached", data_set);
         } else {
             if (allocate_instance(child, NULL, all_coloc, per_host_max,
                                   data_set)) {
                 allocated++;
             }
         }
     }
 
     pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d",
                  allocated, rsc->id, max);
 }
 
 /*!
  * \internal
  * \brief Assign a clone resource to a node
  *
  * \param[in,out] rsc     Resource to assign to a node
  * \param[in]     prefer  Node to prefer, if all else is equal
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  */
 pe_node_t *
 pcmk__clone_allocate(pe_resource_t *rsc, const pe_node_t *prefer)
 {
     GList *nodes = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return NULL;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__add_promotion_scores(rsc);
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
 
     /* This information is used by pcmk__cmp_instance() when deciding the order
      * in which to assign clone instances to nodes.
      */
     for (GList *gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         pe_rsc_trace(rsc, "%s: Allocating %s first",
                      rsc->id, constraint->primary->id);
         constraint->primary->cmds->assign(constraint->primary, prefer);
     }
 
     for (GList *gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         if (pcmk__colocation_has_influence(constraint, NULL)) {
             pe_resource_t *dependent = constraint->dependent;
             const char *attr = constraint->node_attribute;
             const float factor = constraint->score / (float) INFINITY;
             const uint32_t flags = pcmk__coloc_select_active
                                    |pcmk__coloc_select_nonnegative;
 
             dependent->cmds->add_colocated_node_scores(dependent, rsc->id,
                                                        &rsc->allowed_nodes,
                                                        attr, factor, flags);
         }
     }
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     nodes = g_hash_table_get_values(rsc->allowed_nodes);
     nodes = pcmk__sort_nodes(nodes, NULL);
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
     distribute_children(rsc, rsc->children, nodes, clone_data->clone_max,
                         clone_data->clone_node_max, rsc->cluster);
     g_list_free(nodes);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__set_instance_roles(rsc);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
     pe_rsc_trace(rsc, "Done allocating %s", rsc->id);
     return NULL;
 }
 
 static void
 clone_update_pseudo_status(pe_resource_t * rsc, gboolean * stopping, gboolean * starting,
                            gboolean * active)
 {
     GList *gIter = NULL;
 
     if (rsc->children) {
 
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             clone_update_pseudo_status(child, stopping, starting, active);
         }
 
         return;
     }
 
     CRM_ASSERT(active != NULL);
     CRM_ASSERT(starting != NULL);
     CRM_ASSERT(stopping != NULL);
 
     if (rsc->running_on) {
         *active = TRUE;
     }
 
     gIter = rsc->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (*starting && *stopping) {
             return;
 
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid);
             continue;
 
         } else if (!pcmk_any_flags_set(action->flags,
                                        pe_action_pseudo|pe_action_runnable)) {
             pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid);
             continue;
 
         } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)) {
             pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid);
             *stopping = TRUE;
 
         } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)) {
             if (!pcmk_is_set(action->flags, pe_action_runnable)) {
                 pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d",
                              action->uuid,
                              pcmk_is_set(action->flags, pe_action_runnable),
                              pcmk_is_set(action->flags, pe_action_pseudo));
             } else {
                 pe_rsc_trace(rsc, "Starting due to: %s", action->uuid);
                 pe_rsc_trace(rsc, "%s run=%d, pseudo=%d",
                              action->uuid,
                              pcmk_is_set(action->flags, pe_action_runnable),
                              pcmk_is_set(action->flags, pe_action_pseudo));
                 *starting = TRUE;
             }
         }
     }
 }
 
 static pe_action_t *
 find_rsc_action(pe_resource_t *rsc, const char *task)
 {
     pe_action_t *match = NULL;
     GList *actions = pe__resource_actions(rsc, NULL, task, FALSE);
 
     for (GList *item = actions; item != NULL; item = item->next) {
         pe_action_t *op = (pe_action_t *) item->data;
 
         if (!pcmk_is_set(op->flags, pe_action_optional)) {
             if (match != NULL) {
                 // More than one match, don't return any
                 match = NULL;
                 break;
             }
             match = op;
         }
     }
     g_list_free(actions);
     return match;
 }
 
 static void
 child_ordering_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_action_t *stop = NULL;
     pe_action_t *start = NULL;
     pe_action_t *last_stop = NULL;
     pe_action_t *last_start = NULL;
     GList *gIter = NULL;
 
     if (!pe__clone_is_ordered(rsc)) {
         return;
     }
 
     /* we have to maintain a consistent sorted child list when building order constraints */
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         stop = find_rsc_action(child, RSC_STOP);
         if (stop) {
             if (last_stop) {
                 /* child/child relative stop */
                 order_actions(stop, last_stop, pe_order_optional);
             }
             last_stop = stop;
         }
 
         start = find_rsc_action(child, RSC_START);
         if (start) {
             if (last_start) {
                 /* child/child relative start */
                 order_actions(last_start, start, pe_order_optional);
             }
             last_start = start;
         }
     }
 }
 
 void
 clone_create_actions(pe_resource_t *rsc)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_debug(rsc, "Creating actions for clone %s", rsc->id);
     clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify,
                                 &clone_data->stop_notify);
     child_ordering_constraints(rsc, rsc->cluster);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__create_promotable_actions(rsc);
     }
 }
 
 void
 clone_create_pseudo_actions(pe_resource_t *rsc, GList *children,
                             notify_data_t **start_notify,
                             notify_data_t **stop_notify)
 {
     gboolean child_active = FALSE;
     gboolean child_starting = FALSE;
     gboolean child_stopping = FALSE;
     gboolean allow_dependent_migrations = TRUE;
 
     pe_action_t *stop = NULL;
     pe_action_t *stopped = NULL;
 
     pe_action_t *start = NULL;
     pe_action_t *started = NULL;
 
     pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
 
     for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean starting = FALSE;
         gboolean stopping = FALSE;
 
         child_rsc->cmds->create_actions(child_rsc);
         clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active);
         if (stopping && starting) {
             allow_dependent_migrations = FALSE;
         }
 
         child_stopping |= stopping;
         child_starting |= starting;
     }
 
     /* start */
     start = pe__new_rsc_pseudo_action(rsc, RSC_START, !child_starting, true);
     started = pe__new_rsc_pseudo_action(rsc, RSC_STARTED, !child_starting,
                                         false);
     started->priority = INFINITY;
 
     if (child_active || child_starting) {
         pe__set_action_flags(started, pe_action_runnable);
     }
 
     if (start_notify != NULL && *start_notify == NULL) {
         *start_notify = pe__clone_notif_pseudo_ops(rsc, RSC_START, start,
                                                    started);
     }
 
     /* stop */
     stop = pe__new_rsc_pseudo_action(rsc, RSC_STOP, !child_stopping, true);
     stopped = pe__new_rsc_pseudo_action(rsc, RSC_STOPPED, !child_stopping,
                                         true);
     stopped->priority = INFINITY;
     if (allow_dependent_migrations) {
         pe__set_action_flags(stop, pe_action_migrate_runnable);
     }
 
     if (stop_notify != NULL && *stop_notify == NULL) {
         *stop_notify = pe__clone_notif_pseudo_ops(rsc, RSC_STOP, stop, stopped);
 
         if (start_notify && *start_notify && *stop_notify) {
             order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional);
         }
     }
 }
 
 void
 clone_internal_constraints(pe_resource_t *rsc)
 {
     pe_resource_t *last_rsc = NULL;
     GList *gIter;
     bool ordered = pe__clone_is_ordered(rsc);
 
     pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id);
     pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
                                  pe_order_optional);
     pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
                                  pe_order_runnable_left);
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
                                  pe_order_runnable_left);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
                                      pe_order_optional);
         pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
                                      pe_order_runnable_left);
     }
 
     if (ordered) {
         /* we have to maintain a consistent sorted child list when building order constraints */
         rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     }
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->internal_constraints(child_rsc);
 
         pcmk__order_starts(rsc, child_rsc,
                            pe_order_runnable_left|pe_order_implies_first_printed);
         pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
                                      pe_order_implies_then_printed);
         if (ordered && (last_rsc != NULL)) {
             pcmk__order_starts(last_rsc, child_rsc, pe_order_optional);
         }
 
         pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed);
         pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
                                      pe_order_implies_then_printed);
         if (ordered && (last_rsc != NULL)) {
             pcmk__order_stops(child_rsc, last_rsc, pe_order_optional);
         }
 
         last_rsc = child_rsc;
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__order_promotable_instances(rsc);
     }
 }
 
 gboolean
 is_child_compatible(const pe_resource_t *child_rsc, const pe_node_t *local_node,
                     enum rsc_role_e filter, gboolean current)
 {
     pe_node_t *node = NULL;
     enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current);
 
     CRM_CHECK(child_rsc && local_node, return FALSE);
     if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
         /* We only want instances that haven't failed */
         node = child_rsc->fns->location(child_rsc, NULL, current);
     }
 
     if (filter != RSC_ROLE_UNKNOWN && next_role != filter) {
         crm_trace("Filtered %s", child_rsc->id);
         return FALSE;
     }
 
     if (node && (node->details == local_node->details)) {
         return TRUE;
 
     } else if (node) {
         crm_trace("%s - %s vs %s", child_rsc->id, pe__node_name(node),
                   pe__node_name(local_node));
 
     } else {
         crm_trace("%s - not allocated %d", child_rsc->id, current);
     }
     return FALSE;
 }
 
 pe_resource_t *
 find_compatible_child(const pe_resource_t *local_child,
                       const pe_resource_t *rsc, enum rsc_role_e filter,
                       gboolean current)
 {
     pe_resource_t *pair = NULL;
     GList *gIter = NULL;
     GList *scratch = NULL;
     pe_node_t *local_node = NULL;
 
     local_node = local_child->fns->location(local_child, NULL, current);
     if (local_node) {
         return find_compatible_child_by_node(local_child, local_node, rsc, filter, current);
     }
 
     scratch = g_hash_table_get_values(local_child->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL);
 
     gIter = scratch;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = find_compatible_child_by_node(local_child, node, rsc, filter, current);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id);
   done:
     g_list_free(scratch);
     return pair;
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
                               const pe_resource_t *primary,
                               const pcmk__colocation_t *colocation,
                               bool for_dependent)
 {
     GList *gIter = NULL;
     gboolean do_interleave = FALSE;
     const char *interleave_s = NULL;
 
     /* This should never be called for the clone itself as a dependent. Instead,
      * we add its colocation constraints to its instances and call the
      * apply_coloc_score() for the instances as dependents.
      */
     CRM_ASSERT(!for_dependent);
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
     CRM_CHECK(dependent->variant == pe_native, return);
 
     pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
                  colocation->id, dependent->id, primary->id, colocation->score);
 
     if (pcmk_is_set(primary->flags, pe_rsc_promotable)) {
         if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
             // We haven't placed the primary yet, so we can't apply colocation
             pe_rsc_trace(primary, "%s is still provisional", primary->id);
             return;
 
         } else if (colocation->primary_role == RSC_ROLE_UNKNOWN) {
             // This isn't a role-specfic colocation, so handle normally
             pe_rsc_trace(primary, "Handling %s as a clone colocation",
                          colocation->id);
 
         } else if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
             // We're placing the dependent
             pcmk__update_dependent_with_promotable(primary, dependent,
                                                    colocation);
             return;
 
         } else if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
             // We're choosing roles for the dependent
             pcmk__update_promotable_dependent_priority(primary, dependent,
                                                        colocation);
             return;
         }
     }
 
     // Only the dependent needs to be marked for interleave
     interleave_s = g_hash_table_lookup(colocation->dependent->meta,
                                        XML_RSC_ATTR_INTERLEAVE);
     if (crm_is_true(interleave_s)
         && (colocation->dependent->variant > pe_group)) {
         /* @TODO Do we actually care about multiple primary copies sharing a
          * dependent copy anymore?
          */
         if (copies_per_node(colocation->dependent) != copies_per_node(colocation->primary)) {
             pcmk__config_err("Cannot interleave %s and %s because they do not "
                              "support the same number of instances per node",
                              colocation->dependent->id,
                              colocation->primary->id);
 
         } else {
             do_interleave = TRUE;
         }
     }
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary, "%s is still provisional", primary->id);
         return;
 
     } else if (do_interleave) {
         pe_resource_t *primary_instance = NULL;
 
         primary_instance = find_compatible_child(dependent, primary,
                                                  RSC_ROLE_UNKNOWN, FALSE);
         if (primary_instance != NULL) {
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_instance->id);
             dependent->cmds->apply_coloc_score(dependent, primary_instance,
                                                colocation, true);
 
         } else if (colocation->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true);
 
         } else {
             pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
                          dependent->id, primary->id);
         }
 
         return;
 
     } else if (colocation->score >= INFINITY) {
         GList *affected_nodes = NULL;
 
         gIter = primary->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
             pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
 
             if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
                 pe_rsc_trace(primary, "Allowing %s: %s %d",
                              colocation->id, pe__node_name(chosen),
                              chosen->weight);
                 affected_nodes = g_list_prepend(affected_nodes, chosen);
             }
         }
 
         node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE);
         g_list_free(affected_nodes);
         return;
     }
 
     gIter = primary->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_coloc_score(dependent, child_rsc, colocation,
                                            false);
     }
 }
 
 enum action_tasks
 clone_child_action(pe_action_t * action)
 {
     enum action_tasks result = no_action;
     pe_resource_t *child = (pe_resource_t *) action->rsc->children->data;
 
     if (pcmk__strcase_any_of(action->task, "notify", "notified", NULL)) {
 
         /* Find the action we're notifying about instead */
 
         int stop = 0;
         char *key = action->uuid;
         int lpc = strlen(key);
 
         for (; lpc > 0; lpc--) {
             if (key[lpc] == '_' && stop == 0) {
                 stop = lpc;
 
             } else if (key[lpc] == '_') {
                 char *task_mutable = NULL;
 
                 lpc++;
                 task_mutable = strdup(key + lpc);
                 task_mutable[stop - lpc] = 0;
 
                 crm_trace("Extracted action '%s' from '%s'", task_mutable, key);
                 result = get_complex_task(child, task_mutable, TRUE);
                 free(task_mutable);
                 break;
             }
         }
 
     } else {
         result = get_complex_task(child, action->task, TRUE);
     }
     return result;
 }
 
 #define pe__clear_action_summary_flags(flags, action, flag) do {        \
         flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                      "Action summary", action->rsc->id, \
                                      flags, flag, #flag);               \
     } while (0)
 
 enum pe_action_flags
 summary_action_flags(pe_action_t *action, GList *children,
                      const pe_node_t *node)
 {
     GList *gIter = NULL;
     gboolean any_runnable = FALSE;
     gboolean check_runnable = TRUE;
     enum action_tasks task = clone_child_action(action);
     enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
     const char *task_s = task2text(task);
 
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_action_t *child_action = NULL;
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node);
         pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id,
                      pe__node_name(node), child_action?child_action->uuid:"NA");
         if (child_action) {
             enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
 
             if (pcmk_is_set(flags, pe_action_optional)
                 && !pcmk_is_set(child_flags, pe_action_optional)) {
                 pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid,
                              child_action->uuid);
                 pe__clear_action_summary_flags(flags, action, pe_action_optional);
                 pe__clear_action_flags(action, pe_action_optional);
             }
             if (pcmk_is_set(child_flags, pe_action_runnable)) {
                 any_runnable = TRUE;
             }
         }
     }
 
     if (check_runnable && any_runnable == FALSE) {
         pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid);
         pe__clear_action_summary_flags(flags, action, pe_action_runnable);
         if (node == NULL) {
             pe__clear_action_flags(action, pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 enum pe_action_flags
 clone_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     return summary_action_flags(action, action->rsc->children, node);
 }
 
 void
 clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     GList *gIter = rsc->children;
 
     pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
 
     pcmk__apply_location(rsc, constraint);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_location(child_rsc, constraint);
     }
 }
 
 /*!
  * \internal
  * \brief Add a resource's actions to the transition graph
  *
  * \param[in] rsc  Resource whose actions should be added
  */
 void
 clone_expand(pe_resource_t *rsc)
 {
     GList *gIter = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL);
 
     pe__create_notifications(rsc, clone_data->start_notify);
     pe__create_notifications(rsc, clone_data->stop_notify);
     pe__create_notifications(rsc, clone_data->promote_notify);
     pe__create_notifications(rsc, clone_data->demote_notify);
 
     /* Now that the notifcations have been created we can expand the children */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->add_actions_to_graph(child_rsc);
     }
 
     pcmk__add_rsc_actions_to_graph(rsc);
 
     /* The notifications are in the graph now, we can destroy the notify_data */
     pe__free_notification_data(clone_data->demote_notify);
     clone_data->demote_notify = NULL;
     pe__free_notification_data(clone_data->stop_notify);
     clone_data->stop_notify = NULL;
     pe__free_notification_data(clone_data->start_notify);
     clone_data->start_notify = NULL;
     pe__free_notification_data(clone_data->promote_notify);
     clone_data->promote_notify = NULL;
 }
 
 // Check whether a resource or any of its children is known on node
 static bool
 rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node)
 {
     if (rsc->children) {
         for (GList *child_iter = rsc->children; child_iter != NULL;
              child_iter = child_iter->next) {
 
             pe_resource_t *child = (pe_resource_t *) child_iter->data;
 
             if (rsc_known_on(child, node)) {
                 return TRUE;
             }
         }
 
     } else if (rsc->known_on) {
         GHashTableIter iter;
         pe_node_t *known_node = NULL;
 
         g_hash_table_iter_init(&iter, rsc->known_on);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
             if (node->details == known_node->details) {
                 return TRUE;
             }
         }
     }
     return FALSE;
 }
 
 // Look for an instance of clone that is known on node
 static pe_resource_t *
 find_instance_on(const pe_resource_t *clone, const pe_node_t *node)
 {
     for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         if (rsc_known_on(child, node)) {
             return child;
         }
     }
     return NULL;
 }
 
 // For anonymous clones, only a single instance needs to be probed
 static bool
 probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
                       pe_working_set_t *data_set)
 {
     // First, check if we probed an instance on this node last time
     pe_resource_t *child = find_instance_on(rsc, node);
 
     // Otherwise, check if we plan to start an instance on this node
     if (child == NULL) {
         for (GList *child_iter = rsc->children; child_iter && !child;
              child_iter = child_iter->next) {
 
             pe_node_t *local_node = NULL;
             pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data;
 
             if (child_rsc) { /* make clang analyzer happy */
                 local_node = child_rsc->fns->location(child_rsc, NULL, FALSE);
                 if (local_node && (local_node->details == node->details)) {
                     child = child_rsc;
                 }
             }
         }
     }
 
     // Otherwise, use the first clone instance
     if (child == NULL) {
         child = rsc->children->data;
     }
     CRM_ASSERT(child);
     return child->cmds->create_probe(child, node);
 }
 
 /*!
  * \internal
  *
  * \brief Schedule any probes needed for a resource on a node
  *
  * \param[in] rsc   Resource to create probe for
  * \param[in] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 clone_create_probe(pe_resource_t *rsc, pe_node_t *node)
 {
     CRM_ASSERT(rsc);
 
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     if (rsc->children == NULL) {
         pe_warn("Clone %s has no children", rsc->id);
         return false;
     }
 
     if (rsc->exclusive_discover) {
         pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
         if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) {
             /* exclusive discover is enabled and this node is not marked
              * as a node this resource should be discovered on
              *
              * remove the node from allowed_nodes so that the
              * notification contains only nodes that we might ever run
              * on
              */
             g_hash_table_remove(rsc->allowed_nodes, node->details->id);
 
             /* Bit of a shortcut - might as well take it */
             return false;
         }
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         return pcmk__probe_resource_list(rsc->children, node);
     } else {
         return probe_anonymous_clone(rsc, node, rsc->cluster);
     }
 }
 
 void
 clone_append_meta(pe_resource_t * rsc, xmlNode * xml)
 {
     char *name = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
     crm_xml_add_int(xml, name, clone_data->clone_max);
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
     crm_xml_add_int(xml, name, clone_data->clone_node_max);
     free(name);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         int promoted_max = pe__clone_promoted_max(rsc);
         int promoted_node_max = pe__clone_promoted_node_max(rsc);
 
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
         crm_xml_add_int(xml, name, promoted_max);
         free(name);
 
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
         crm_xml_add_int(xml, name, promoted_node_max);
         free(name);
 
         /* @COMPAT Maintain backward compatibility with resource agents that
          * expect the old names (deprecated since 2.0.0).
          */
         name = crm_meta_name(PCMK_XA_PROMOTED_MAX_LEGACY);
         crm_xml_add_int(xml, name, promoted_max);
         free(name);
 
         name = crm_meta_name(PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
         crm_xml_add_int(xml, name, promoted_node_max);
         free(name);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:add_utilization()
 void
-pcmk__clone_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
-                            GList *all_rscs, GHashTable *utilization)
+pcmk__clone_add_utilization(const pe_resource_t *rsc,
+                            const pe_resource_t *orig_rsc, GList *all_rscs,
+                            GHashTable *utilization)
 {
     bool existing = false;
     pe_resource_t *child = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     // Look for any child already existing in the list
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         child = (pe_resource_t *) iter->data;
         if (g_list_find(all_rscs, child)) {
             existing = true; // Keep checking remaining children
         } else {
             // If this is a clone of a group, look for group's members
             for (GList *member_iter = child->children; member_iter != NULL;
                  member_iter = member_iter->next) {
 
                 pe_resource_t *member = (pe_resource_t *) member_iter->data;
 
                 if (g_list_find(all_rscs, member) != NULL) {
                     // Add *child's* utilization, not group member's
                     child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                                  utilization);
                     existing = true;
                     break;
                 }
             }
         }
     }
 
     if (!existing && (rsc->children != NULL)) {
         // If nothing was found, still add first child's utilization
         child = (pe_resource_t *) rsc->children->data;
 
         child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__clone_shutdown_lock(pe_resource_t *rsc)
 {
     return; // Clones currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index d9e4d0a3c6..4e8c53a830 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -1,738 +1,739 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define VARIANT_GROUP 1
 #include <lib/pengine/variant.h>
 
 /*!
  * \internal
  * \brief Expand a group's colocations to its members
  *
  * \param[in,out] rsc  Group resource
  */
 static void
 expand_group_colocations(pe_resource_t *rsc)
 {
     group_variant_data_t *group_data = NULL;
     pe_resource_t *member = NULL;
     bool any_unmanaged = false;
 
     get_group_variant_data(group_data, rsc);
 
     // Treat "group with R" colocations as "first member with R"
     member = group_data->first_child;
     member->rsc_cons = g_list_concat(member->rsc_cons, rsc->rsc_cons);
 
 
     /* The above works for the whole group because each group member is
      * colocated with the previous one.
      *
      * However, there is a special case when a group has a mandatory colocation
      * with a resource that can't start. In that case,
      * pcmk__block_colocated_starts() will ensure that dependent resources in
      * mandatory colocations (i.e. the first member for groups) can't start
      * either. But if any group member is unmanaged and already started, the
      * internal group colocations are no longer sufficient to make that apply to
      * later members.
      *
      * To handle that case, add mandatory colocations to each member after the
      * first.
      */
     any_unmanaged = !pcmk_is_set(member->flags, pe_rsc_managed);
     for (GList *item = rsc->children->next; item != NULL; item = item->next) {
         member = item->data;
         if (any_unmanaged) {
             for (GList *cons_iter = rsc->rsc_cons; cons_iter != NULL;
                  cons_iter = cons_iter->next) {
 
                 pcmk__colocation_t *constraint = (pcmk__colocation_t *) cons_iter->data;
 
                 if (constraint->score == INFINITY) {
                     member->rsc_cons = g_list_prepend(member->rsc_cons, constraint);
                 }
             }
         } else if (!pcmk_is_set(member->flags, pe_rsc_managed)) {
             any_unmanaged = true;
         }
     }
 
     rsc->rsc_cons = NULL;
 
     // Treat "R with group" colocations as "R with last member"
     member = group_data->last_child;
     member->rsc_cons_lhs = g_list_concat(member->rsc_cons_lhs,
                                          rsc->rsc_cons_lhs);
     rsc->rsc_cons_lhs = NULL;
 }
 
 /*!
  * \internal
  * \brief Assign a group resource to a node
  *
  * \param[in,out] rsc     Resource to assign to a node
  * \param[in]     prefer  Node to prefer, if all else is equal
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  */
 pe_node_t *
 pcmk__group_allocate(pe_resource_t *rsc, const pe_node_t *prefer)
 {
     pe_node_t *node = NULL;
     pe_node_t *group_node = NULL;
     GList *gIter = NULL;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to;
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     if (group_data->first_child == NULL) {
         // Nothing to allocate
         pe__clear_resource_flags(rsc, pe_rsc_provisional);
         return NULL;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     rsc->role = group_data->first_child->role;
 
     expand_group_colocations(rsc);
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Allocating group %s member %s",
                      rsc->id, child_rsc->id);
         node = child_rsc->cmds->assign(child_rsc, prefer);
         if (group_node == NULL) {
             group_node = node;
         }
     }
 
     pe__set_next_role(rsc, group_data->first_child->next_role,
                       "first group member");
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
 
     if (group_data->colocated) {
         return group_node;
     }
     return NULL;
 }
 
 void group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child);
 
 void
 group_create_actions(pe_resource_t *rsc)
 {
     pe_action_t *op = NULL;
     const char *value = NULL;
     GList *gIter = rsc->children;
 
     pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->create_actions(child_rsc);
         group_update_pseudo_status(rsc, child_rsc);
     }
 
     op = start_action(rsc, NULL, TRUE /* !group_data->child_starting */ );
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     op = custom_action(rsc, started_key(rsc),
                        RSC_STARTED, NULL,
                        TRUE /* !group_data->child_starting */ ,
                        TRUE, rsc->cluster);
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     op = stop_action(rsc, NULL, TRUE /* !group_data->child_stopping */ );
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     op = custom_action(rsc, stopped_key(rsc),
                        RSC_STOPPED, NULL,
                        TRUE /* !group_data->child_stopping */ ,
                        TRUE, rsc->cluster);
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE);
     if (crm_is_true(value)) {
         op = custom_action(rsc, demote_key(rsc), RSC_DEMOTE, NULL, TRUE, TRUE,
                            rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
         op = custom_action(rsc, demoted_key(rsc), RSC_DEMOTED, NULL, TRUE, TRUE,
                            rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
         op = custom_action(rsc, promote_key(rsc), RSC_PROMOTE, NULL, TRUE, TRUE,
                            rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
         op = custom_action(rsc, promoted_key(rsc), RSC_PROMOTED, NULL, TRUE,
                            TRUE, rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
     }
 }
 
 void
 group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child)
 {
     GList *gIter = child->actions;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, parent);
 
     if (group_data->ordered == FALSE) {
         /* If this group is not ordered, then leave the meta-actions as optional */
         return;
     }
 
     if (group_data->child_stopping && group_data->child_starting) {
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (pcmk_is_set(action->flags, pe_action_optional)) {
             continue;
         }
         if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)
             && pcmk_is_set(action->flags, pe_action_runnable)) {
 
             group_data->child_stopping = TRUE;
             pe_rsc_trace(action->rsc, "Based on %s the group is stopping", action->uuid);
 
         } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)
                    && pcmk_is_set(action->flags, pe_action_runnable)) {
             group_data->child_starting = TRUE;
             pe_rsc_trace(action->rsc, "Based on %s the group is starting", action->uuid);
         }
     }
 }
 
 void
 group_internal_constraints(pe_resource_t *rsc)
 {
     GList *gIter = rsc->children;
     pe_resource_t *last_rsc = NULL;
     pe_resource_t *last_active = NULL;
     pe_resource_t *top = uber_parent(rsc);
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
                                  pe_order_optional);
     pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
                                  pe_order_runnable_left);
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
                                  pe_order_runnable_left);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         int stop = pe_order_none;
         int stopped = pe_order_implies_then_printed;
         int start = pe_order_implies_then | pe_order_runnable_left;
         int started =
             pe_order_runnable_left | pe_order_implies_then | pe_order_implies_then_printed;
 
         child_rsc->cmds->internal_constraints(child_rsc);
 
         if (last_rsc == NULL) {
             if (group_data->ordered) {
                 pe__set_order_flags(stop, pe_order_optional);
                 stopped = pe_order_implies_then;
             }
 
         } else if (group_data->colocated) {
             pcmk__new_colocation("group:internal_colocation", NULL, INFINITY,
                                  child_rsc, last_rsc, NULL, NULL,
                                  pcmk_is_set(child_rsc->flags, pe_rsc_critical),
                                  rsc->cluster);
         }
 
         if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, child_rsc, RSC_DEMOTE,
                                          stop|pe_order_implies_first_printed);
 
             pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, rsc,
                                          RSC_DEMOTED, stopped);
 
             pcmk__order_resource_actions(child_rsc, RSC_PROMOTE, rsc,
                                          RSC_PROMOTED, started);
 
             pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
                                          RSC_PROMOTE,
                                          pe_order_implies_first_printed);
 
         }
 
         pcmk__order_starts(rsc, child_rsc, pe_order_implies_first_printed);
         pcmk__order_stops(rsc, child_rsc,
                           stop|pe_order_implies_first_printed);
 
         pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
                                      stopped);
         pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
                                      started);
 
         if (group_data->ordered == FALSE) {
             pcmk__order_starts(rsc, child_rsc,
                                start|pe_order_implies_first_printed);
             if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
                 pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
                                              RSC_PROMOTE,
                                              start|pe_order_implies_first_printed);
             }
 
         } else if (last_rsc != NULL) {
             pcmk__order_starts(last_rsc, child_rsc, start);
             pcmk__order_stops(child_rsc, last_rsc,
                               pe_order_optional|pe_order_restart);
 
             if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
                 pcmk__order_resource_actions(last_rsc, RSC_PROMOTE, child_rsc,
                                              RSC_PROMOTE, start);
                 pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, last_rsc,
                                              RSC_DEMOTE, pe_order_optional);
             }
 
         } else {
             pcmk__order_starts(rsc, child_rsc, pe_order_none);
             if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
                 pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
                                              RSC_PROMOTE, pe_order_none);
             }
         }
 
         /* Look for partially active groups
          * Make sure they still shut down in sequence
          */
         if (child_rsc->running_on) {
             if (group_data->ordered
                 && last_rsc
                 && last_rsc->running_on == NULL && last_active && last_active->running_on) {
                 pcmk__order_stops(child_rsc, last_active, pe_order_optional);
             }
             last_active = child_rsc;
         }
 
         last_rsc = child_rsc;
     }
 
     if (group_data->ordered && last_rsc != NULL) {
         int stop_stop_flags = pe_order_implies_then;
         int stop_stopped_flags = pe_order_optional;
 
         pcmk__order_stops(rsc, last_rsc, stop_stop_flags);
         pcmk__order_resource_actions(last_rsc, RSC_STOP, rsc, RSC_STOPPED,
                                      stop_stopped_flags);
 
         if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE,
                                          stop_stop_flags);
             pcmk__order_resource_actions(last_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED,
                                          stop_stopped_flags);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__group_apply_coloc_score(pe_resource_t *dependent,
                               const pe_resource_t *primary,
                               const pcmk__colocation_t *colocation,
                               bool for_dependent)
 {
     GList *gIter = NULL;
     group_variant_data_t *group_data = NULL;
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
 
     if (!for_dependent) {
         goto for_primary;
     }
 
     gIter = dependent->children;
     pe_rsc_trace(dependent, "Processing constraints from %s", dependent->id);
 
     get_group_variant_data(group_data, dependent);
 
     if (group_data->colocated) {
         group_data->first_child->cmds->apply_coloc_score(group_data->first_child,
                                                          primary, colocation,
                                                          true);
         return;
 
     } else if (colocation->score >= INFINITY) {
         pcmk__config_err("%s: Cannot perform mandatory colocation "
                          "between non-colocated group and %s",
                          dependent->id, primary->id);
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_coloc_score(child_rsc, primary, colocation,
                                            true);
     }
     return;
 
 for_primary:
     gIter = primary->children;
     get_group_variant_data(group_data, primary);
     CRM_CHECK(dependent->variant == pe_native, return);
 
     pe_rsc_trace(primary,
                  "Processing colocation %s (%s with group %s) for primary",
                  colocation->id, dependent->id, primary->id);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         return;
 
     } else if (group_data->colocated && group_data->first_child) {
         if (colocation->score >= INFINITY) {
             // Dependent can't start until group is fully up
             group_data->last_child->cmds->apply_coloc_score(dependent,
                                                             group_data->last_child,
                                                             colocation, false);
         } else {
             // Dependent can start as long as group is partially up
             group_data->first_child->cmds->apply_coloc_score(dependent,
                                                              group_data->first_child,
                                                              colocation, false);
         }
 
         return;
 
     } else if (colocation->score >= INFINITY) {
         pcmk__config_err("%s: Cannot perform mandatory colocation with"
                          " non-colocated group %s", dependent->id, primary->id);
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_coloc_score(dependent, child_rsc, colocation,
                                            false);
     }
 }
 
 enum pe_action_flags
 group_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     GList *gIter = NULL;
     enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
 
     for (gIter = action->rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
         enum action_tasks task = get_complex_task(child, action->task, TRUE);
         const char *task_s = task2text(task);
         pe_action_t *child_action = find_first_action(child->actions, NULL, task_s, node);
 
         if (child_action) {
             enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
 
             if (pcmk_is_set(flags, pe_action_optional)
                 && !pcmk_is_set(child_flags, pe_action_optional)) {
                 pe_rsc_trace(action->rsc, "%s is mandatory because of %s", action->uuid,
                              child_action->uuid);
                 pe__clear_raw_action_flags(flags, "group action",
                                            pe_action_optional);
                 pe__clear_action_flags(action, pe_action_optional);
             }
             if (!pcmk__str_eq(task_s, action->task, pcmk__str_casei)
                 && pcmk_is_set(flags, pe_action_runnable)
                 && !pcmk_is_set(child_flags, pe_action_runnable)) {
 
                 pe_rsc_trace(action->rsc, "%s is not runnable because of %s", action->uuid,
                              child_action->uuid);
                 pe__clear_raw_action_flags(flags, "group action",
                                            pe_action_runnable);
                 pe__clear_action_flags(action, pe_action_runnable);
             }
 
         } else if (task != stop_rsc && task != action_demote) {
             pe_rsc_trace(action->rsc, "%s is not runnable because of %s (not found in %s)",
                          action->uuid, task_s, child->id);
             pe__clear_raw_action_flags(flags, "group action",
                                        pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two actions, update the actions'
  * flags (and runnable_before members if appropriate) as appropriate for the
  * ordering. In some cases, the ordering could be disabled as well.
  *
  * \param[in] first     'First' action in an ordering
  * \param[in] then      'Then' action in an ordering
  * \param[in] node      If not NULL, limit scope of ordering to this node
  *                      (only used when interleaving instances)
  * \param[in] flags     Action flags for \p first for ordering purposes
  * \param[in] filter    Action flags to limit scope of certain updates (may
  *                      include pe_action_optional to affect only mandatory
  *                      actions, and pe_action_runnable to affect only
  *                      runnable actions)
  * \param[in] type      Group of enum pe_ordering flags to apply
  * \param[in] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 group_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
                      uint32_t flags, uint32_t filter, uint32_t type,
                      pe_working_set_t *data_set)
 {
     GList *gIter = then->rsc->children;
     uint32_t changed = pcmk__updated_none;
 
     CRM_ASSERT(then->rsc != NULL);
     changed |= pcmk__update_ordered_actions(first, then, node, flags, filter,
                                             type, data_set);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
         pe_action_t *child_action = find_first_action(child->actions, NULL, then->task, node);
 
         if (child_action) {
             changed |= child->cmds->update_ordered_actions(first, child_action,
                                                            node, flags, filter,
                                                            type, data_set);
         }
     }
 
     return changed;
 }
 
 void
 group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     GList *gIter = rsc->children;
     GList *saved = constraint->node_list_rh;
     GList *zero = pcmk__copy_node_list(constraint->node_list_rh, true);
     gboolean reset_scores = TRUE;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     pe_rsc_debug(rsc, "Processing rsc_location %s for %s", constraint->id, rsc->id);
 
     pcmk__apply_location(rsc, constraint);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_location(child_rsc, constraint);
         if (group_data->colocated && reset_scores) {
             reset_scores = FALSE;
             constraint->node_list_rh = zero;
         }
     }
 
     constraint->node_list_rh = saved;
     g_list_free_full(zero, free);
 }
 
 /*!
  * \internal
  * \brief Update nodes with scores of colocated resources' nodes
  *
  * Given a table of nodes and a resource, update the nodes' scores with the
  * scores of the best nodes matching the attribute used for each of the
  * resource's relevant colocations.
  *
  * \param[in,out] rsc      Resource to check colocations for
  * \param[in]     log_id   Resource ID to use in log messages
  * \param[in,out] nodes    Nodes to update
  * \param[in]     attr     Colocation attribute (NULL to use default)
  * \param[in]     factor   Incorporate scores multiplied by this factor
  * \param[in]     flags    Bitmask of enum pcmk__coloc_select values
  *
  * \note The caller remains responsible for freeing \p *nodes.
  */
 void
 pcmk__group_add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
                                       GHashTable **nodes, const char *attr,
                                       float factor, uint32_t flags)
 {
     GList *gIter = rsc->rsc_cons_lhs;
     pe_resource_t *member = NULL;
     group_variant_data_t *group_data = NULL;
 
     CRM_CHECK((rsc != NULL) && (nodes != NULL), return);
 
     if (log_id == NULL) {
         log_id = rsc->id;
     }
 
     get_group_variant_data(group_data, rsc);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
         pe_rsc_info(rsc, "Breaking dependency loop with %s at %s",
                     rsc->id, log_id);
         return;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_merging);
 
     member = group_data->first_child;
     member->cmds->add_colocated_node_scores(member, log_id, nodes, attr,
                                             factor, flags);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         pcmk__add_colocated_node_scores(constraint->dependent, rsc->id, nodes,
                                         constraint->node_attribute,
                                         constraint->score / (float) INFINITY,
                                         flags);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_merging);
 }
 
 void
 group_append_meta(pe_resource_t * rsc, xmlNode * xml)
 {
 }
 
 // Group implementation of resource_alloc_functions_t:colocated_resources()
 GList *
 pcmk__group_colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                 GList *colocated_rscs)
 {
     pe_resource_t *child_rsc = NULL;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     if (orig_rsc == NULL) {
         orig_rsc = rsc;
     }
 
     if (group_data->colocated || pe_rsc_is_clone(rsc->parent)) {
         /* This group has colocated members and/or is cloned -- either way,
          * add every child's colocated resources to the list.
          */
         for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             child_rsc = (pe_resource_t *) gIter->data;
             colocated_rscs = child_rsc->cmds->colocated_resources(child_rsc,
                                                                   orig_rsc,
                                                                   colocated_rscs);
         }
 
     } else if (group_data->first_child != NULL) {
         /* This group's members are not colocated, and the group is not cloned,
          * so just add the first child's colocations to the list.
          */
         child_rsc = group_data->first_child;
         colocated_rscs = child_rsc->cmds->colocated_resources(child_rsc,
                                                               orig_rsc,
                                                               colocated_rscs);
     }
 
     // Now consider colocations where the group itself is specified
     colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc, colocated_rscs);
 
     return colocated_rscs;
 }
 
 // Group implementation of resource_alloc_functions_t:add_utilization()
 void
-pcmk__group_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
-                            GList *all_rscs, GHashTable *utilization)
+pcmk__group_add_utilization(const pe_resource_t *rsc,
+                            const pe_resource_t *orig_rsc, GList *all_rscs,
+                            GHashTable *utilization)
 {
     group_variant_data_t *group_data = NULL;
     pe_resource_t *child = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
                  orig_rsc->id, rsc->id);
     get_group_variant_data(group_data, rsc);
     if (group_data->colocated || pe_rsc_is_clone(rsc->parent)) {
         // Every group member will be on same node, so sum all members
         for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
             child = (pe_resource_t *) iter->data;
 
             if (pcmk_is_set(child->flags, pe_rsc_provisional)
                 && (g_list_find(all_rscs, child) == NULL)) {
                 child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                              utilization);
             }
         }
 
     } else {
         // Just add first child's utilization
         child = group_data->first_child;
         if ((child != NULL)
             && pcmk_is_set(child->flags, pe_rsc_provisional)
             && (g_list_find(all_rscs, child) == NULL)) {
 
             child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                          utilization);
         }
     }
 }
 
 // Group implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__group_shutdown_lock(pe_resource_t *rsc)
 {
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child = (pe_resource_t *) iter->data;
 
         child->cmds->shutdown_lock(child);
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index 8227f07362..09c617e96e 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -1,1544 +1,1545 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 static void stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
 static void start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
 static void demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional);
 static void promote_resource(pe_resource_t *rsc, pe_node_t *node,
                              bool optional);
 static void assert_role_error(pe_resource_t *rsc, pe_node_t *node,
                               bool optional);
 
 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
     /* This array lists the immediate next role when transitioning from one role
      * to a target role. For example, when going from Stopped to Promoted, the
      * next role is Unpromoted, because the resource must be started before it
      * can be promoted. The current state then becomes Started, which is fed
      * into this array again, giving a next role of Promoted.
      *
      * Current role       Immediate next role   Final target role
      * ------------       -------------------   -----------------
      */
     /* Unknown */       { RSC_ROLE_UNKNOWN,     /* Unknown */
                           RSC_ROLE_STOPPED,     /* Stopped */
                           RSC_ROLE_STOPPED,     /* Started */
                           RSC_ROLE_STOPPED,     /* Unpromoted */
                           RSC_ROLE_STOPPED,     /* Promoted */
                         },
     /* Stopped */       { RSC_ROLE_STOPPED,     /* Unknown */
                           RSC_ROLE_STOPPED,     /* Stopped */
                           RSC_ROLE_STARTED,     /* Started */
                           RSC_ROLE_UNPROMOTED,  /* Unpromoted */
                           RSC_ROLE_UNPROMOTED,  /* Promoted */
                         },
     /* Started */       { RSC_ROLE_STOPPED,     /* Unknown */
                           RSC_ROLE_STOPPED,     /* Stopped */
                           RSC_ROLE_STARTED,     /* Started */
                           RSC_ROLE_UNPROMOTED,  /* Unpromoted */
                           RSC_ROLE_PROMOTED,    /* Promoted */
                         },
     /* Unpromoted */    { RSC_ROLE_STOPPED,     /* Unknown */
                           RSC_ROLE_STOPPED,     /* Stopped */
                           RSC_ROLE_STOPPED,     /* Started */
                           RSC_ROLE_UNPROMOTED,  /* Unpromoted */
                           RSC_ROLE_PROMOTED,    /* Promoted */
                         },
     /* Promoted  */     { RSC_ROLE_STOPPED,     /* Unknown */
                           RSC_ROLE_UNPROMOTED,  /* Stopped */
                           RSC_ROLE_UNPROMOTED,  /* Started */
                           RSC_ROLE_UNPROMOTED,  /* Unpromoted */
                           RSC_ROLE_PROMOTED,    /* Promoted */
                         },
 };
 
 /*!
  * \internal
  * \brief Function to schedule actions needed for a role change
  *
  * \param[in,out] rsc       Resource whose role is changing
  * \param[in]     node      Node where resource will be in its next role
  * \param[in]     optional  Whether scheduled actions should be optional
  */
 typedef void (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *node,
                                   bool optional);
 
 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
     /* This array lists the function needed to transition directly from one role
      * to another. NULL indicates that nothing is needed.
      *
      * Current role         Transition function             Next role
      * ------------         -------------------             ----------
      */
     /* Unknown */       {   assert_role_error,              /* Unknown */
                             stop_resource,                  /* Stopped */
                             assert_role_error,              /* Started */
                             assert_role_error,              /* Unpromoted */
                             assert_role_error,              /* Promoted */
                         },
     /* Stopped */       {   assert_role_error,              /* Unknown */
                             NULL,                           /* Stopped */
                             start_resource,                 /* Started */
                             start_resource,                 /* Unpromoted */
                             assert_role_error,              /* Promoted */
                         },
     /* Started */       {   assert_role_error,              /* Unknown */
                             stop_resource,                  /* Stopped */
                             NULL,                           /* Started */
                             NULL,                           /* Unpromoted */
                             promote_resource,               /* Promoted */
                         },
     /* Unpromoted */    {   assert_role_error,              /* Unknown */
                             stop_resource,                  /* Stopped */
                             stop_resource,                  /* Started */
                             NULL,                           /* Unpromoted */
                             promote_resource,               /* Promoted */
                         },
     /* Promoted  */     {   assert_role_error,              /* Unknown */
                             demote_resource,                /* Stopped */
                             demote_resource,                /* Started */
                             demote_resource,                /* Unpromoted */
                             NULL,                           /* Promoted */
                         },
 };
 
 /*!
  * \internal
  * \brief Get a list of a resource's allowed nodes sorted by node weight
  *
  * \param[in] rsc  Resource to check
  *
  * \return List of allowed nodes sorted by node weight
  */
 static GList *
 sorted_allowed_nodes(const pe_resource_t *rsc)
 {
     if (rsc->allowed_nodes != NULL) {
         GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
 
         if (nodes != NULL) {
             return pcmk__sort_nodes(nodes, pe__current_node(rsc));
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Assign a resource to its best allowed node, if possible
  *
  * \param[in,out] rsc     Resource to choose a node for
  * \param[in]     prefer  If not NULL, prefer this node when all else equal
  *
  * \return true if \p rsc could be assigned to a node, otherwise false
  */
 static bool
 assign_best_node(pe_resource_t *rsc, const pe_node_t *prefer)
 {
     GList *nodes = NULL;
     pe_node_t *chosen = NULL;
     pe_node_t *best = NULL;
     bool result = false;
     const pe_node_t *most_free_node = pcmk__ban_insufficient_capacity(rsc);
 
     if (prefer == NULL) {
         prefer = most_free_node;
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         // We've already finished assignment of resources to nodes
         return rsc->allocated_to != NULL;
     }
 
     // Sort allowed nodes by weight
     nodes = sorted_allowed_nodes(rsc);
     if (nodes != NULL) {
         best = (pe_node_t *) nodes->data; // First node has best score
     }
 
     if ((prefer != NULL) && (nodes != NULL)) {
         // Get the allowed node version of prefer
         chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
 
         if (chosen == NULL) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
                          pe__node_name(prefer), rsc->id);
 
         /* Favor the preferred node as long as its weight is at least as good as
          * the best allowed node's.
          *
          * An alternative would be to favor the preferred node even if the best
          * node is better, when the best node's weight is less than INFINITY.
          */
         } else if (chosen->weight < best->weight) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
                          pe__node_name(chosen), rsc->id);
             chosen = NULL;
 
         } else if (!pcmk__node_available(chosen, true, false)) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
                          pe__node_name(chosen), rsc->id);
             chosen = NULL;
 
         } else {
             pe_rsc_trace(rsc,
                          "Chose preferred node %s for %s (ignoring %d candidates)",
                          pe__node_name(chosen), rsc->id, g_list_length(nodes));
         }
     }
 
     if ((chosen == NULL) && (best != NULL)) {
         /* Either there is no preferred node, or the preferred node is not
          * suitable, but another node is allowed to run the resource.
          */
 
         chosen = best;
 
         if (!pe_rsc_is_unique_clone(rsc->parent)
             && (chosen->weight > 0) // Zero not acceptable
             && pcmk__node_available(chosen, false, false)) {
             /* If the resource is already running on a node, prefer that node if
              * it is just as good as the chosen node.
              *
              * We don't do this for unique clone instances, because
              * distribute_children() has already assigned instances to their
              * running nodes when appropriate, and if we get here, we don't want
              * remaining unassigned instances to prefer a node that's already
              * running another instance.
              */
             pe_node_t *running = pe__current_node(rsc);
 
             if (running == NULL) {
                 // Nothing to do
 
             } else if (!pcmk__node_available(running, true, false)) {
                 pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
                              rsc->id, pe__node_name(running));
 
             } else {
                 int nodes_with_best_score = 1;
 
                 for (GList *iter = nodes->next; iter; iter = iter->next) {
                     pe_node_t *allowed = (pe_node_t *) iter->data;
 
                     if (allowed->weight != chosen->weight) {
                         // The nodes are sorted by weight, so no more are equal
                         break;
                     }
                     if (pe__same_node(allowed, running)) {
                         // Scores are equal, so prefer the current node
                         chosen = allowed;
                     }
                     nodes_with_best_score++;
                 }
 
                 if (nodes_with_best_score > 1) {
                     do_crm_log(((chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO),
                                "Chose %s for %s from %d nodes with score %s",
                                pe__node_name(chosen), rsc->id,
                                nodes_with_best_score,
                                pcmk_readable_score(chosen->weight));
                 }
             }
         }
 
         pe_rsc_trace(rsc, "Chose %s for %s from %d candidates",
                      pe__node_name(chosen), rsc->id, g_list_length(nodes));
     }
 
     result = pcmk__finalize_assignment(rsc, chosen, false);
     g_list_free(nodes);
     return result;
 }
 
 /*!
  * \internal
  * \brief Apply a "this with" colocation to a node's allowed node scores
  *
  * \param[in] data       Colocation to apply
  * \param[in] user_data  Resource being assigned
  */
 static void
 apply_this_with(void *data, void *user_data)
 {
     pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
     pe_resource_t *rsc = (pe_resource_t *) user_data;
 
     GHashTable *archive = NULL;
     pe_resource_t *other = colocation->primary;
 
     // In certain cases, we will need to revert the node scores
     if ((colocation->dependent_role >= RSC_ROLE_PROMOTED)
         || ((colocation->score < 0) && (colocation->score > -INFINITY))) {
         archive = pcmk__copy_node_table(rsc->allowed_nodes);
     }
 
     pe_rsc_trace(rsc,
                  "%s: Assigning colocation %s primary %s first"
                  "(score=%d role=%s)",
                  rsc->id, colocation->id, other->id,
                  colocation->score, role2text(colocation->dependent_role));
     other->cmds->assign(other, NULL);
 
     // Apply the colocation score to this resource's allowed node scores
     rsc->cmds->apply_coloc_score(rsc, other, colocation, true);
     if ((archive != NULL)
         && !pcmk__any_node_available(rsc->allowed_nodes)) {
         pe_rsc_info(rsc,
                     "%s: Reverting scores from colocation with %s "
                     "because no nodes allowed",
                     rsc->id, other->id);
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = archive;
         archive = NULL;
     }
     if (archive != NULL) {
         g_hash_table_destroy(archive);
     }
 }
 
 /*!
  * \internal
  * \brief Apply a "with this" colocation to a node's allowed node scores
  *
  * \param[in] data       Colocation to apply
  * \param[in] user_data  Resource being assigned
  */
 static void
 apply_with_this(void *data, void *user_data)
 {
     pcmk__colocation_t *colocation = (pcmk__colocation_t *) data;
     pe_resource_t *rsc = (pe_resource_t *) user_data;
 
     pe_resource_t *other = colocation->dependent;
     const float factor = colocation->score / (float) INFINITY;
 
     if (!pcmk__colocation_has_influence(colocation, NULL)) {
         return;
     }
     pe_rsc_trace(rsc,
                  "%s: Incorporating attenuated %s assignment scores due "
                  "to colocation %s", rsc->id, other->id, colocation->id);
     other->cmds->add_colocated_node_scores(other, rsc->id,
                                            &rsc->allowed_nodes,
                                            colocation->node_attribute,
                                            factor, pcmk__coloc_select_active);
 }
 
 /*!
  * \internal
  * \brief Update a Pacemaker Remote node once its connection has been assigned
  *
  * \param[in] connection  Connection resource that has been assigned
  */
 static void
 remote_connection_assigned(pe_resource_t *connection)
 {
     pe_node_t *remote_node = pe_find_node(connection->cluster->nodes,
                                           connection->id);
 
     CRM_CHECK(remote_node != NULL, return);
 
     if ((connection->allocated_to != NULL)
         && (connection->next_role != RSC_ROLE_STOPPED)) {
 
         crm_trace("Pacemaker Remote node %s will be online",
                   remote_node->details->id);
         remote_node->details->online = TRUE;
         if (remote_node->details->unseen) {
             // Avoid unnecessary fence, since we will attempt connection
             remote_node->details->unclean = FALSE;
         }
 
     } else {
         crm_trace("Pacemaker Remote node %s will be shut down "
                   "(%sassigned connection's next role is %s)",
                   remote_node->details->id,
                   ((connection->allocated_to == NULL)? "un" : ""),
                   role2text(connection->next_role));
         remote_node->details->shutdown = TRUE;
     }
 }
 
 /*!
  * \internal
  * \brief Assign a primitive resource to a node
  *
  * \param[in,out] rsc     Resource to assign to a node
  * \param[in]     prefer  Node to prefer, if all else is equal
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  */
 pe_node_t *
 pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer)
 {
     CRM_ASSERT(rsc != NULL);
 
     // Never assign a child without parent being assigned first
     if ((rsc->parent != NULL)
         && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "%s: Assigning parent %s first",
                      rsc->id, rsc->parent->id);
         rsc->parent->cmds->assign(rsc->parent, prefer);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to; // Assignment has already been done
     }
 
     // Ensure we detect assignment loops
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Breaking assignment loop involving %s", rsc->id);
         return NULL;
     }
     pe__set_resource_flags(rsc, pe_rsc_allocating);
 
     pe__show_node_weights(true, rsc, "Pre-assignment", rsc->allowed_nodes,
                           rsc->cluster);
 
     g_list_foreach(rsc->rsc_cons, apply_this_with, rsc);
     pe__show_node_weights(true, rsc, "Post-this-with", rsc->allowed_nodes,
                           rsc->cluster);
 
     g_list_foreach(rsc->rsc_cons_lhs, apply_with_this, rsc);
 
     if (rsc->next_role == RSC_ROLE_STOPPED) {
         pe_rsc_trace(rsc,
                      "Banning %s from all nodes because it will be stopped",
                      rsc->id);
         resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE,
                           rsc->cluster);
 
     } else if ((rsc->next_role > rsc->role)
                && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum)
                && (rsc->cluster->no_quorum_policy == no_quorum_freeze)) {
         crm_notice("Resource %s cannot be elevated from %s to %s due to "
                    "no-quorum-policy=freeze",
                    rsc->id, role2text(rsc->role), role2text(rsc->next_role));
         pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
     }
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     // Unmanage resource if fencing is enabled but no device is configured
     if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
         && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         // Unmanaged resources stay on their current node
         const char *reason = NULL;
         pe_node_t *assign_to = NULL;
 
         pe__set_next_role(rsc, rsc->role, "unmanaged");
         assign_to = pe__current_node(rsc);
         if (assign_to == NULL) {
             reason = "inactive";
         } else if (rsc->role == RSC_ROLE_PROMOTED) {
             reason = "promoted";
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             reason = "failed";
         } else {
             reason = "active";
         }
         pe_rsc_info(rsc, "Unmanaged resource %s assigned to %s: %s", rsc->id,
                     (assign_to? assign_to->details->uname : "no node"), reason);
         pcmk__finalize_assignment(rsc, assign_to, true);
 
     } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) {
         pe_rsc_debug(rsc, "Forcing %s to stop: stop-all-resources", rsc->id);
         pcmk__finalize_assignment(rsc, NULL, true);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
                && assign_best_node(rsc, prefer)) {
         // Assignment successful
 
     } else if (rsc->allocated_to == NULL) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
             pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
         } else if (rsc->running_on != NULL) {
             pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
         }
 
     } else {
         pe_rsc_debug(rsc, "%s: pre-assigned to %s", rsc->id,
                      pe__node_name(rsc->allocated_to));
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating);
 
     if (rsc->is_remote_node) {
         remote_connection_assigned(rsc);
     }
 
     return rsc->allocated_to;
 }
 
 /*!
  * \internal
  * \brief Schedule actions to bring resource down and back to current role
  *
  * \param[in,out] rsc           Resource to restart
  * \param[in]     current       Node that resource should be brought down on
  * \param[in]     need_stop     Whether the resource must be stopped
  * \param[in]     need_promote  Whether the resource must be promoted
  *
  * \return Role that resource would have after scheduled actions are taken
  */
 static void
 schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
                          bool need_stop, bool need_promote)
 {
     enum rsc_role_e role = rsc->role;
     enum rsc_role_e next_role;
     rsc_transition_fn fn = NULL;
 
     pe__set_resource_flags(rsc, pe_rsc_restarting);
 
     // Bring resource down to a stop on its current node
     while (role != RSC_ROLE_STOPPED) {
         next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
         pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
                      (need_stop? "required" : "optional"), rsc->id,
                      role2text(role), role2text(next_role));
         fn = rsc_action_matrix[role][next_role];
         if (fn == NULL) {
             break;
         }
         fn(rsc, current, !need_stop);
         role = next_role;
     }
 
     // Bring resource up to its next role on its next node
     while ((rsc->role <= rsc->next_role) && (role != rsc->role)
            && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
         bool required = need_stop;
 
         next_role = rsc_state_matrix[role][rsc->role];
         if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
             required = true;
         }
         pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
                      (required? "required" : "optional"), rsc->id,
                      role2text(role), role2text(next_role));
         fn = rsc_action_matrix[role][next_role];
         if (fn == NULL) {
             break;
         }
         fn(rsc, rsc->allocated_to, !required);
         role = next_role;
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_restarting);
 }
 
 /*!
  * \internal
  * \brief If a resource's next role is not explicitly specified, set a default
  *
  * \param[in,out] rsc  Resource to set next role for
  *
  * \return "explicit" if next role was explicitly set, otherwise "implicit"
  */
 static const char *
 set_default_next_role(pe_resource_t *rsc)
 {
     if (rsc->next_role != RSC_ROLE_UNKNOWN) {
         return "explicit";
     }
 
     if (rsc->allocated_to == NULL) {
         pe__set_next_role(rsc, RSC_ROLE_STOPPED, "assignment");
     } else {
         pe__set_next_role(rsc, RSC_ROLE_STARTED, "assignment");
     }
     return "implicit";
 }
 
 /*!
  * \internal
  * \brief Create an action to represent an already pending start
  *
  * \param[in,out] rsc  Resource to create start action for
  */
 static void
 create_pending_start(pe_resource_t *rsc)
 {
     pe_action_t *start = NULL;
 
     pe_rsc_trace(rsc,
                  "Creating action for %s to represent already pending start",
                  rsc->id);
     start = start_action(rsc, rsc->allocated_to, TRUE);
     pe__set_action_flags(start, pe_action_print_always);
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to take a resource to its next role
  *
  * \param[in,out] rsc  Resource to schedule actions for
  */
 static void
 schedule_role_transition_actions(pe_resource_t *rsc)
 {
     enum rsc_role_e role = rsc->role;
 
     while (role != rsc->next_role) {
         enum rsc_role_e next_role = rsc_state_matrix[role][rsc->next_role];
         rsc_transition_fn fn = NULL;
 
         pe_rsc_trace(rsc,
                      "Creating action to take %s from %s to %s (ending at %s)",
                      rsc->id, role2text(role), role2text(next_role),
                      role2text(rsc->next_role));
         fn = rsc_action_matrix[role][next_role];
         if (fn == NULL) {
             break;
         }
         fn(rsc, rsc->allocated_to, false);
         role = next_role;
     }
 }
 
 /*!
  * \internal
  * \brief Create all actions needed for a given primitive resource
  *
  * \param[in,out] rsc  Primitive resource to create actions for
  */
 void
 pcmk__primitive_create_actions(pe_resource_t *rsc)
 {
     bool need_stop = false;
     bool need_promote = false;
     bool is_moving = false;
     bool allow_migrate = false;
     bool multiply_active = false;
 
     pe_node_t *current = NULL;
     unsigned int num_all_active = 0;
     unsigned int num_clean_active = 0;
     const char *next_role_source = NULL;
 
     CRM_ASSERT(rsc != NULL);
 
     next_role_source = set_default_next_role(rsc);
     pe_rsc_trace(rsc,
                  "Creating all actions for %s transition from %s to %s "
                  "(%s) on %s",
                  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
                  next_role_source, pe__node_name(rsc->allocated_to));
 
     current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
 
     g_list_foreach(rsc->dangling_migrations, pcmk__abort_dangling_migration,
                    rsc);
 
     if ((current != NULL) && (rsc->allocated_to != NULL)
         && (current->details != rsc->allocated_to->details)
         && (rsc->next_role >= RSC_ROLE_STARTED)) {
 
         pe_rsc_trace(rsc, "Moving %s from %s to %s",
                      rsc->id, pe__node_name(current),
                      pe__node_name(rsc->allocated_to));
         is_moving = true;
         allow_migrate = pcmk__rsc_can_migrate(rsc, current);
 
         // This is needed even if migrating (though I'm not sure why ...)
         need_stop = true;
     }
 
     // Check whether resource is partially migrated and/or multiply active
     if ((rsc->partial_migration_source != NULL)
         && (rsc->partial_migration_target != NULL)
         && allow_migrate && (num_all_active == 2)
         && pe__same_node(current, rsc->partial_migration_source)
         && pe__same_node(rsc->allocated_to, rsc->partial_migration_target)) {
         /* A partial migration is in progress, and the migration target remains
          * the same as when the migration began.
          */
         pe_rsc_trace(rsc, "Partial migration of %s from %s to %s will continue",
                      rsc->id, pe__node_name(rsc->partial_migration_source),
                      pe__node_name(rsc->partial_migration_target));
 
     } else if ((rsc->partial_migration_source != NULL)
                || (rsc->partial_migration_target != NULL)) {
         // A partial migration is in progress but can't be continued
 
         if (num_all_active > 2) {
             // The resource is migrating *and* multiply active!
             crm_notice("Forcing recovery of %s because it is migrating "
                        "from %s to %s and possibly active elsewhere",
                        rsc->id, pe__node_name(rsc->partial_migration_source),
                        pe__node_name(rsc->partial_migration_target));
         } else {
             // The migration source or target isn't available
             crm_notice("Forcing recovery of %s because it can no longer "
                        "migrate from %s to %s",
                        rsc->id, pe__node_name(rsc->partial_migration_source),
                        pe__node_name(rsc->partial_migration_target));
         }
         need_stop = true;
         rsc->partial_migration_source = rsc->partial_migration_target = NULL;
         allow_migrate = false;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         multiply_active = (num_all_active > 1);
     } else {
         /* If a resource has "requires" set to nothing or quorum, don't consider
          * it active on unclean nodes (similar to how all resources behave when
          * stonith-enabled is false). We can start such resources elsewhere
          * before fencing completes, and if we considered the resource active on
          * the failed node, we would attempt recovery for being active on
          * multiple nodes.
          */
         multiply_active = (num_clean_active > 1);
     }
 
     if (multiply_active) {
         const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
         // Resource was (possibly) incorrectly multiply active
         pe_proc_err("%s resource %s might be active on %u nodes (%s)",
                     pcmk__s(class, "Untyped"), rsc->id, num_all_active,
                     recovery2text(rsc->recovery_type));
         crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ"
                    "#Resource_is_Too_Active for more information");
 
         switch (rsc->recovery_type) {
             case recovery_stop_start:
                 need_stop = true;
                 break;
             case recovery_stop_unexpected:
                 need_stop = true; // stop_resource() will skip expected node
                 pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
                 break;
             default:
                 break;
         }
 
     } else {
         pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
         create_pending_start(rsc);
     }
 
     if (is_moving) {
         // Remaining tests are only for resources staying where they are
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
             need_stop = true;
             pe_rsc_trace(rsc, "Recovering %s", rsc->id);
         } else {
             pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
             if (rsc->next_role == RSC_ROLE_PROMOTED) {
                 need_promote = true;
             }
         }
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
         pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
         need_stop = true;
 
     } else if ((rsc->role > RSC_ROLE_STARTED) && (current != NULL)
                && (rsc->allocated_to != NULL)) {
         pe_action_t *start = NULL;
 
         pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
                      rsc->id);
         start = start_action(rsc, rsc->allocated_to, TRUE);
         if (!pcmk_is_set(start->flags, pe_action_optional)) {
             // Recovery of a promoted resource
             pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
             need_stop = true;
         }
     }
 
     // Create any actions needed to bring resource down and back up to same role
     schedule_restart_actions(rsc, current, need_stop, need_promote);
 
     // Create any actions needed to take resource from this role to the next
     schedule_role_transition_actions(rsc);
 
     pcmk__create_recurring_actions(rsc);
 
     if (allow_migrate) {
         pcmk__create_migration_actions(rsc, current);
     }
 }
 
 /*!
  * \internal
  * \brief Ban a resource from any allowed nodes that are Pacemaker Remote nodes
  *
  * \param[in] rsc  Resource to check
  */
 static void
 rsc_avoids_remote_nodes(const pe_resource_t *rsc)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         if (node->details->remote_rsc != NULL) {
             node->weight = -INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Return allowed nodes as (possibly sorted) list
  *
  * Convert a resource's hash table of allowed nodes to a list. If printing to
  * stdout, sort the list, to keep action ID numbers consistent for regression
  * test output (while avoiding the performance hit on a live cluster).
  *
  * \param[in] rsc       Resource to check for allowed nodes
  * \param[in] data_set  Cluster working set
  *
  * \return List of resource's allowed nodes
  * \note Callers should take care not to rely on the list being sorted.
  */
 static GList *
 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     GList *allowed_nodes = NULL;
 
     if (rsc->allowed_nodes) {
         allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
     }
 
     if (!pcmk__is_daemon) {
         allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
     }
 
     return allowed_nodes;
 }
 
 /*!
  * \internal
  * \brief Create implicit constraints needed for a primitive resource
  *
  * \param[in,out] rsc  Primitive resource to create implicit constraints for
  */
 void
 pcmk__primitive_internal_constraints(pe_resource_t *rsc)
 {
     pe_resource_t *top = NULL;
     GList *allowed_nodes = NULL;
     bool check_unfencing = false;
     bool check_utilization = false;
 
     CRM_ASSERT(rsc != NULL);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_rsc_trace(rsc,
                      "Skipping implicit constraints for unmanaged resource %s",
                      rsc->id);
         return;
     }
 
     top = uber_parent(rsc);
 
     // Whether resource requires unfencing
     check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
                       && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)
                       && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
 
     // Whether a non-default placement strategy is used
     check_utilization = (g_hash_table_size(rsc->utilization) > 0)
                          && !pcmk__str_eq(rsc->cluster->placement_strategy,
                                           "default", pcmk__str_casei);
 
     // Order stops before starts (i.e. restart)
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                        rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                        pe_order_optional|pe_order_implies_then|pe_order_restart,
                        rsc->cluster);
 
     // Promotable ordering: demote before stop, start before promote
     if (pcmk_is_set(top->flags, pe_rsc_promotable)
         || (rsc->role > RSC_ROLE_UNPROMOTED)) {
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                            pe_order_promoted_implies_first, rsc->cluster);
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
                            pe_order_runnable_left, rsc->cluster);
     }
 
     // Don't clear resource history if probing on same node
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
                        NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
                        NULL, pe_order_same_node|pe_order_then_cancels_first,
                        rsc->cluster);
 
     // Certain checks need allowed nodes
     if (check_unfencing || check_utilization || (rsc->container != NULL)) {
         allowed_nodes = allowed_nodes_as_list(rsc, rsc->cluster);
     }
 
     if (check_unfencing) {
         // Check whether the node needs to be unfenced
 
         for (GList *item = allowed_nodes; item; item = item->next) {
             pe_node_t *node = item->data;
             pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE,
                                                rsc->cluster);
 
             crm_debug("Ordering any stops of %s before %s, and any starts after",
                       rsc->id, unfence->uuid);
 
             /*
              * It would be more efficient to order clone resources once,
              * rather than order each instance, but ordering the instance
              * allows us to avoid unnecessary dependencies that might conflict
              * with user constraints.
              *
              * @TODO: This constraint can still produce a transition loop if the
              * resource has a stop scheduled on the node being unfenced, and
              * there is a user ordering constraint to start some other resource
              * (which will be ordered after the unfence) before stopping this
              * resource. An example is "start some slow-starting cloned service
              * before stopping an associated virtual IP that may be moving to
              * it":
              *       stop this -> unfencing -> start that -> stop this
              */
             pcmk__new_ordering(rsc, stop_key(rsc), NULL,
                                NULL, strdup(unfence->uuid), unfence,
                                pe_order_optional|pe_order_same_node,
                                rsc->cluster);
 
             pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
                                rsc, start_key(rsc), NULL,
                                pe_order_implies_then_on_node|pe_order_same_node,
                                rsc->cluster);
         }
     }
 
     if (check_utilization) {
         pcmk__create_utilization_constraints(rsc, allowed_nodes);
     }
 
     if (rsc->container != NULL) {
         pe_resource_t *remote_rsc = NULL;
 
         if (rsc->is_remote_node) {
             // rsc is the implicit remote connection for a guest or bundle node
 
             /* Guest resources are not allowed to run on Pacemaker Remote nodes,
              * to avoid nesting remotes. However, bundles are allowed.
              */
             if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
                 rsc_avoids_remote_nodes(rsc->container);
             }
 
             /* If someone cleans up a guest or bundle node's container, we will
              * likely schedule a (re-)probe of the container and recovery of the
              * connection. Order the connection stop after the container probe,
              * so that if we detect the container running, we will trigger a new
              * transition and avoid the unnecessary recovery.
              */
             pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc,
                                          RSC_STOP, pe_order_optional);
 
         /* A user can specify that a resource must start on a Pacemaker Remote
          * node by explicitly configuring it with the container=NODENAME
          * meta-attribute. This is of questionable merit, since location
          * constraints can accomplish the same thing. But we support it, so here
          * we check whether a resource (that is not itself a remote connection)
          * has container set to a remote node or guest node resource.
          */
         } else if (rsc->container->is_remote_node) {
             remote_rsc = rsc->container;
         } else  {
             remote_rsc = pe__resource_contains_guest_node(rsc->cluster,
                                                           rsc->container);
         }
 
         if (remote_rsc != NULL) {
             /* Force the resource on the Pacemaker Remote node instead of
              * colocating the resource with the container resource.
              */
             for (GList *item = allowed_nodes; item; item = item->next) {
                 pe_node_t *node = item->data;
 
                 if (node->details->remote_rsc != remote_rsc) {
                     node->weight = -INFINITY;
                 }
             }
 
         } else {
             /* This resource is either a filler for a container that does NOT
              * represent a Pacemaker Remote node, or a Pacemaker Remote
              * connection resource for a guest node or bundle.
              */
             int score;
 
             crm_trace("Order and colocate %s relative to its container %s",
                       rsc->id, rsc->container->id);
 
             pcmk__new_ordering(rsc->container,
                                pcmk__op_key(rsc->container->id, RSC_START, 0),
                                NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
                                NULL,
                                pe_order_implies_then|pe_order_runnable_left,
                                rsc->cluster);
 
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                                rsc->container,
                                pcmk__op_key(rsc->container->id, RSC_STOP, 0),
                                NULL, pe_order_implies_first, rsc->cluster);
 
             if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
                 score = 10000;    /* Highly preferred but not essential */
             } else {
                 score = INFINITY; /* Force them to run on the same host */
             }
             pcmk__new_colocation("resource-with-container", NULL, score, rsc,
                                  rsc->container, NULL, NULL, true,
                                  rsc->cluster);
         }
     }
 
     if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
         /* Remote connections and fencing devices are not allowed to run on
          * Pacemaker Remote nodes
          */
         rsc_avoids_remote_nodes(rsc);
     }
     g_list_free(allowed_nodes);
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in] for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
                                   const pe_resource_t *primary,
                                   const pcmk__colocation_t *colocation,
                                   bool for_dependent)
 {
     enum pcmk__coloc_affects filter_results;
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
 
     if (for_dependent) {
         // Always process on behalf of primary resource
         primary->cmds->apply_coloc_score(dependent, primary, colocation, false);
         return;
     }
 
     filter_results = pcmk__colocation_affects(dependent, primary, colocation,
                                               false);
     pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
                  ((colocation->score > 0)? "Colocating" : "Anti-colocating"),
                  dependent->id, primary->id, colocation->id, colocation->score,
                  filter_results);
 
     switch (filter_results) {
         case pcmk__coloc_affects_role:
             pcmk__apply_coloc_to_priority(dependent, primary, colocation);
             break;
         case pcmk__coloc_affects_location:
             pcmk__apply_coloc_to_weights(dependent, primary, colocation);
             break;
         default: // pcmk__coloc_affects_nothing
             return;
     }
 }
 
 /*!
  * \internal
  * \brief Return action flags for a given primitive resource action
  *
  * \param[in,out] action  Action to get flags for
  * \param[in]     node    If not NULL, limit effects to this node (ignored)
  *
  * \return Flags appropriate to \p action on \p node
  */
 enum pe_action_flags
 pcmk__primitive_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     CRM_ASSERT(action != NULL);
     return action->flags;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is a multiply active resource's expected node
  *
  * \param[in] rsc  Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p rsc is multiply active with multiple-active set to
  *         stop_unexpected, and \p node is the node where it will remain active
  * \note This assumes that the resource's next role cannot be changed to stopped
  *       after this is called, which should be reasonable if status has already
  *       been unpacked and resources have been assigned to nodes.
  */
 static bool
 is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
 {
     return pcmk_all_flags_set(rsc->flags,
                               pe_rsc_stop_unexpected|pe_rsc_restarting)
            && (rsc->next_role > RSC_ROLE_STOPPED)
            && pe__same_node(rsc->allocated_to, node);
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to stop a resource wherever it is active
  *
  * \param[in,out] rsc       Resource being stopped
  * \param[in]     node      Node where resource is being stopped (ignored)
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 stop_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
 {
     for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
         pe_node_t *current = (pe_node_t *) iter->data;
         pe_action_t *stop = NULL;
 
         if (is_expected_node(rsc, current)) {
             /* We are scheduling restart actions for a multiply active resource
              * with multiple-active=stop_unexpected, and this is where it should
              * not be stopped.
              */
             pe_rsc_trace(rsc,
                          "Skipping stop of multiply active resource %s "
                          "on expected node %s",
                          rsc->id, pe__node_name(current));
             continue;
         }
 
         if (rsc->partial_migration_target != NULL) {
             // Continue migration if node originally was and remains target
             if (pe__same_node(current, rsc->partial_migration_target)
                 && pe__same_node(current, rsc->allocated_to)) {
                 pe_rsc_trace(rsc,
                              "Skipping stop of %s on %s "
                              "because partial migration there will continue",
                              rsc->id, pe__node_name(current));
                 continue;
             } else {
                 pe_rsc_trace(rsc,
                              "Forcing stop of %s on %s "
                              "because migration target changed",
                              rsc->id, pe__node_name(current));
                 optional = false;
             }
         }
 
         pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
                      rsc->id, pe__node_name(current));
         stop = stop_action(rsc, current, optional);
 
         if (rsc->allocated_to == NULL) {
             pe_action_set_reason(stop, "node availability", true);
         } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
                                                   |pe_rsc_stop_unexpected)) {
             /* We are stopping a multiply active resource on a node that is
              * not its expected node, and we are still scheduling restart
              * actions, so the stop is for being multiply active.
              */
             pe_action_set_reason(stop, "being multiply active", true);
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             pe__clear_action_flags(stop, pe_action_runnable);
         }
 
         if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) {
             pcmk__schedule_cleanup(rsc, current, optional);
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
             pe_action_t *unfence = pe_fence_op(current, "on", true, NULL, false,
                                                rsc->cluster);
 
             order_actions(stop, unfence, pe_order_implies_first);
             if (!pcmk__node_unfenced(current)) {
                 pe_proc_err("Stopping %s until %s can be unfenced",
                             rsc->id, pe__node_name(current));
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to start a resource on a node
  *
  * \param[in,out] rsc       Resource being started
  * \param[in]     node      Node where resource should be started
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 start_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
 {
     pe_action_t *start = NULL;
 
     CRM_ASSERT(node != NULL);
 
     pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (score %d)",
                  (optional? "optional" : "required"), rsc->id,
                  pe__node_name(node), node->weight);
     start = start_action(rsc, node, TRUE);
 
     pcmk__order_vs_unfence(rsc, node, start, pe_order_implies_then);
 
     if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
         pe__clear_action_flags(start, pe_action_optional);
     }
 
     if (is_expected_node(rsc, node)) {
         /* This could be a problem if the start becomes necessary for other
          * reasons later.
          */
         pe_rsc_trace(rsc,
                      "Start of multiply active resouce %s "
                      "on expected node %s will be a pseudo-action",
                      rsc->id, pe__node_name(node));
         pe__set_action_flags(start, pe_action_pseudo);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to promote a resource on a node
  *
  * \param[in,out] rsc       Resource being promoted
  * \param[in]     node      Node where resource should be promoted
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 promote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
 {
     GList *iter = NULL;
     GList *action_list = NULL;
     bool runnable = true;
 
     CRM_ASSERT(node != NULL);
 
     // Any start must be runnable for promotion to be runnable
     action_list = pe__resource_actions(rsc, node, RSC_START, true);
     for (iter = action_list; iter != NULL; iter = iter->next) {
         pe_action_t *start = (pe_action_t *) iter->data;
 
         if (!pcmk_is_set(start->flags, pe_action_runnable)) {
             runnable = false;
         }
     }
     g_list_free(action_list);
 
     if (runnable) {
         pe_action_t *promote = promote_action(rsc, node, optional);
 
         pe_rsc_trace(rsc, "Scheduling %s promotion of %s on %s",
                      (optional? "optional" : "required"), rsc->id,
                      pe__node_name(node));
 
         if (is_expected_node(rsc, node)) {
             /* This could be a problem if the promote becomes necessary for
              * other reasons later.
              */
             pe_rsc_trace(rsc,
                          "Promotion of multiply active resouce %s "
                          "on expected node %s will be a pseudo-action",
                          rsc->id, pe__node_name(node));
             pe__set_action_flags(promote, pe_action_pseudo);
         }
     } else {
         pe_rsc_trace(rsc, "Not promoting %s on %s: start unrunnable",
                      rsc->id, pe__node_name(node));
         action_list = pe__resource_actions(rsc, node, RSC_PROMOTE, true);
         for (iter = action_list; iter != NULL; iter = iter->next) {
             pe_action_t *promote = (pe_action_t *) iter->data;
 
             pe__clear_action_flags(promote, pe_action_runnable);
         }
         g_list_free(action_list);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions needed to demote a resource wherever it is active
  *
  * \param[in,out] rsc       Resource being demoted
  * \param[in]     node      Node where resource should be demoted (ignored)
  * \param[in]     optional  Whether actions should be optional
  */
 static void
 demote_resource(pe_resource_t *rsc, pe_node_t *node, bool optional)
 {
     /* Since this will only be called for a primitive (possibly as an instance
      * of a collective resource), the resource is multiply active if it is
      * running on more than one node, so we want to demote on all of them as
      * part of recovery, regardless of which one is the desired node.
      */
     for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
         pe_node_t *current = (pe_node_t *) iter->data;
 
         if (is_expected_node(rsc, current)) {
             pe_rsc_trace(rsc,
                          "Skipping demote of multiply active resource %s "
                          "on expected node %s",
                          rsc->id, pe__node_name(current));
         } else {
             pe_rsc_trace(rsc, "Scheduling %s demotion of %s on %s",
                          (optional? "optional" : "required"), rsc->id,
                          pe__node_name(current));
             demote_action(rsc, current, optional);
         }
     }
 }
 
 static void
 assert_role_error(pe_resource_t *rsc, pe_node_t *node, bool optional)
 {
     CRM_ASSERT(false);
 }
 
 /*!
  * \internal
  * \brief Schedule cleanup of a resource
  *
  * \param[in,out] rsc       Resource to clean up
  * \param[in]     node      Node to clean up on
  * \param[in]     optional  Whether clean-up should be optional
  */
 void
 pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node, bool optional)
 {
     /* If the cleanup is required, its orderings are optional, because they're
      * relevant only if both actions are required. Conversely, if the cleanup is
      * optional, the orderings make the then action required if the first action
      * becomes required.
      */
     enum pe_ordering flag = optional? pe_order_implies_then : pe_order_optional;
 
     CRM_CHECK((rsc != NULL) && (node != NULL), return);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: resource failed",
                      rsc->id, pe__node_name(node));
         return;
     }
 
     if (node->details->unclean || !node->details->online) {
         pe_rsc_trace(rsc, "Skipping clean-up of %s on %s: node unavailable",
                      rsc->id, pe__node_name(node));
         return;
     }
 
     crm_notice("Scheduling clean-up of %s on %s", rsc->id, pe__node_name(node));
     delete_action(rsc, node, optional);
 
     // stop -> clean-up -> start
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE, flag);
     pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START, flag);
 }
 
 /*!
  * \internal
  * \brief Add primitive meta-attributes relevant to graph actions to XML
  *
  * \param[in]     rsc  Primitive resource whose meta-attributes should be added
  * \param[in,out] xml  Transition graph action attributes XML to add to
  */
 void
 pcmk__primitive_add_graph_meta(pe_resource_t *rsc, xmlNode *xml)
 {
     char *name = NULL;
     char *value = NULL;
     pe_resource_t *parent = NULL;
 
     CRM_ASSERT((rsc != NULL) && (xml != NULL));
 
     /* Clone instance numbers get set internally as meta-attributes, and are
      * needed in the transition graph (for example, to tell unique clone
      * instances apart).
      */
     value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
     if (value != NULL) {
         name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     // Not sure if this one is really needed ...
     value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
     if (value != NULL) {
         name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     /* The container meta-attribute can be set on the primitive itself or one of
      * its parents (for example, a group inside a container resource), so check
      * them all, and keep the highest one found.
      */
     for (parent = rsc; parent != NULL; parent = parent->parent) {
         if (parent->container != NULL) {
             crm_xml_add(xml, CRM_META "_" XML_RSC_ATTR_CONTAINER,
                         parent->container->id);
         }
     }
 
     /* Bundle replica children will get their external-ip set internally as a
      * meta-attribute. The graph action needs it, but under a different naming
      * convention than other meta-attributes.
      */
     value = g_hash_table_lookup(rsc->meta, "external-ip");
     if (value != NULL) {
         crm_xml_add(xml, "pcmk_external_ip", value);
     }
 }
 
 // Primitive implementation of resource_alloc_functions_t:add_utilization()
 void
-pcmk__primitive_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
-                                GList *all_rscs, GHashTable *utilization)
+pcmk__primitive_add_utilization(const pe_resource_t *rsc,
+                                const pe_resource_t *orig_rsc, GList *all_rscs,
+                                GHashTable *utilization)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
                  orig_rsc->id, rsc->id);
     pcmk__release_node_capacity(utilization, rsc);
 }
 
 /*!
  * \internal
  * \brief Get epoch time of node's shutdown attribute (or now if none)
  *
  * \param[in] node      Node to check
  * \param[in] data_set  Cluster working set
  *
  * \return Epoch time corresponding to shutdown attribute if set or now if not
  */
 static time_t
 shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
 {
     const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
     time_t result = 0;
 
     if (shutdown != NULL) {
         long long result_ll;
 
         if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
             result = (time_t) result_ll;
         }
     }
     return (result == 0)? get_effective_time(data_set) : result;
 }
 
 // Primitive implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
     // Fence devices and remote connections can't be locked
     if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
         || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
         return;
     }
 
     if (rsc->lock_node != NULL) {
         // The lock was obtained from resource history
 
         if (rsc->running_on != NULL) {
             /* The resource was started elsewhere even though it is now
              * considered locked. This shouldn't be possible, but as a
              * failsafe, we don't want to disturb the resource now.
              */
             pe_rsc_info(rsc,
                         "Cancelling shutdown lock because %s is already active",
                         rsc->id);
             pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
             rsc->lock_node = NULL;
             rsc->lock_time = 0;
         }
 
     // Only a resource active on exactly one node can be locked
     } else if (pcmk__list_of_1(rsc->running_on)) {
         pe_node_t *node = rsc->running_on->data;
 
         if (node->details->shutdown) {
             if (node->details->unclean) {
                 pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
                              rsc->id, pe__node_name(node));
             } else {
                 rsc->lock_node = node;
                 rsc->lock_time = shutdown_time(node, rsc->cluster);
             }
         }
     }
 
     if (rsc->lock_node == NULL) {
         // No lock needed
         return;
     }
 
     if (rsc->cluster->shutdown_lock > 0) {
         time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
 
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
                     rsc->id, pe__node_name(rsc->lock_node),
                     (long long) lock_expiration);
         pe__update_recheck_time(++lock_expiration, rsc->cluster);
     } else {
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
                     rsc->id, pe__node_name(rsc->lock_node));
     }
 
     // If resource is locked to one node, ban it from all other nodes
     for (GList *item = rsc->cluster->nodes; item != NULL; item = item->next) {
         pe_node_t *node = item->data;
 
         if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
             resource_location(rsc, node, -CRM_SCORE_INFINITY,
                               XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster);
         }
     }
 }