diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 9460a9bf63..e3b947d869 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,688 +1,698 @@
 /*
  * Copyright 2021-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__LIBPACEMAKER_PRIVATE__H
 #  define PCMK__LIBPACEMAKER_PRIVATE__H
 
 /* This header is for the sole use of libpacemaker, so that functions can be
  * declared with G_GNUC_INTERNAL for efficiency.
  */
 
 #include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
 
 // Flags to modify the behavior of the add_colocated_node_scores() method
 enum pcmk__coloc_select {
     // With no other flags, apply all "with this" colocations
     pcmk__coloc_select_default      = 0,
 
     // Apply "this with" colocations instead of "with this" colocations
     pcmk__coloc_select_this_with    = (1 << 0),
 
     // Apply only colocations with non-negative scores
     pcmk__coloc_select_nonnegative  = (1 << 1),
 
     // Apply only colocations with at least one matching node
     pcmk__coloc_select_active       = (1 << 2),
 };
 
 // Flags the update_ordered_actions() method can return
 enum pcmk__updated {
     pcmk__updated_none      = 0,        // Nothing changed
     pcmk__updated_first     = (1 << 0), // First action was updated
     pcmk__updated_then      = (1 << 1), // Then action was updated
 };
 
 #define pcmk__set_updated_flags(au_flags, action, flags_to_set) do {        \
         au_flags = pcmk__set_flags_as(__func__, __LINE__,                   \
                                       LOG_TRACE, "Action update",           \
                                       (action)->uuid, au_flags,             \
                                       (flags_to_set), #flags_to_set);       \
     } while (0)
 
 #define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do {    \
         au_flags = pcmk__clear_flags_as(__func__, __LINE__,                 \
                                         LOG_TRACE, "Action update",         \
                                         (action)->uuid, au_flags,           \
                                         (flags_to_clear), #flags_to_clear); \
     } while (0)
 
 // Resource allocation methods
 struct resource_alloc_functions_s {
-    pe_node_t *(*allocate)(pe_resource_t *rsc, pe_node_t *prefer);
+    /*!
+     * \internal
+     * \brief Assign a resource to a node
+     *
+     * \param[in] rsc     Resource to assign to a node
+     * \param[in] prefer  Node to prefer, if all else is equal
+     *
+     * \return Node that \p rsc is assigned to, if assigned entirely to one node
+     */
+    pe_node_t *(*assign)(pe_resource_t *rsc, pe_node_t *prefer);
+
     void (*create_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Schedule any probes needed for a resource on a node
      *
      * \param[in] rsc   Resource to create probe for
      * \param[in] node  Node to create probe on
      *
      * \return true if any probe was created, otherwise false
      */
     bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node);
 
     void (*internal_constraints)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Apply a colocation's score to node weights or resource priority
      *
      * Given a colocation constraint, apply its score to the dependent's
      * allowed node weights (if we are still placing resources) or priority (if
      * we are choosing promotable clone instance roles).
      *
      * \param[in] dependent      Dependent resource in colocation
      * \param[in] primary        Primary resource in colocation
      * \param[in] colocation     Colocation constraint to apply
      * \param[in] for_dependent  true if called on behalf of dependent
      */
     void (*apply_coloc_score) (pe_resource_t *dependent, pe_resource_t *primary,
                                pcmk__colocation_t *colocation,
                                bool for_dependent);
 
     /*!
      * \internal
      * \brief Update nodes with scores of colocated resources' nodes
      *
      * Given a table of nodes and a resource, update the nodes' scores with the
      * scores of the best nodes matching the attribute used for each of the
      * resource's relevant colocations.
      *
      * \param[in,out] rsc      Resource to check colocations for
      * \param[in]     log_id   Resource ID to use in logs (if NULL, use rsc ID)
      * \param[in,out] nodes    Nodes to update
      * \param[in]     attr     Colocation attribute (NULL to use default)
      * \param[in]     factor   Incorporate scores multiplied by this factor
      * \param[in]     flags    Bitmask of enum pcmk__coloc_select values
      *
      * \note The caller remains responsible for freeing \p *nodes.
      */
     void (*add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id,
                                       GHashTable **nodes, const char *attr,
                                       float factor,
                                       enum pcmk__coloc_select flags);
 
     /*!
      * \internal
      * \brief Create list of all resources in colocations with a given resource
      *
      * Given a resource, create a list of all resources involved in mandatory
      * colocations with it, whether directly or indirectly via chained colocations.
      *
      * \param[in] rsc             Resource to add to colocated list
      * \param[in] orig_rsc        Resource originally requested
      * \param[in] colocated_rscs  Existing list
      *
      * \return List of given resource and all resources involved in colocations
      *
      * \note This function is recursive; top-level callers should pass NULL as
      *       \p colocated_rscs and \p orig_rsc, and the desired resource as
      *       \p rsc. The recursive calls will use other values.
      */
     GList *(*colocated_resources)(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                   GList *colocated_rscs);
 
     void (*rsc_location) (pe_resource_t *, pe__location_t *);
 
     enum pe_action_flags (*action_flags) (pe_action_t *, pe_node_t *);
 
     /*!
      * \internal
      * \brief Update two actions according to an ordering between them
      *
      * Given information about an ordering of two actions, update the actions'
      * flags (and runnable_before members if appropriate) as appropriate for the
      * ordering. In some cases, the ordering could be disabled as well.
      *
      * \param[in] first     'First' action in an ordering
      * \param[in] then      'Then' action in an ordering
      * \param[in] node      If not NULL, limit scope of ordering to this node
      *                      (only used when interleaving instances)
      * \param[in] flags     Action flags for \p first for ordering purposes
      * \param[in] filter    Action flags to limit scope of certain updates (may
      *                      include pe_action_optional to affect only mandatory
      *                      actions, and pe_action_runnable to affect only
      *                      runnable actions)
      * \param[in] type      Group of enum pe_ordering flags to apply
      * \param[in] data_set  Cluster working set
      *
      * \return Group of enum pcmk__updated flags indicating what was updated
      */
     uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then,
                                        pe_node_t *node, uint32_t flags,
                                        uint32_t filter, uint32_t type,
                                        pe_working_set_t *data_set);
 
     void (*output_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add a resource's actions to the transition graph
      *
      * \param[in] rsc  Resource whose actions should be added
      */
     void (*add_actions_to_graph)(pe_resource_t *rsc);
 
     void (*append_meta) (pe_resource_t * rsc, xmlNode * xml);
 
     /*!
      * \internal
      * \brief Add a resource's utilization to a table of utilization values
      *
      * This function is used when summing the utilization of a resource and all
      * resources colocated with it, to determine whether a node has sufficient
      * capacity. Given a resource and a table of utilization values, it will add
      * the resource's utilization to the existing values, if the resource has
      * not yet been allocated to a node.
      *
      * \param[in] rsc          Resource with utilization to add
      * \param[in] orig_rsc     Resource being allocated (for logging only)
      * \param[in] all_rscs     List of all resources that will be summed
      * \param[in] utilization  Table of utilization values to add to
      */
     void (*add_utilization)(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                             GList *all_rscs, GHashTable *utilization);
 
     /*!
      * \internal
      * \brief Apply a shutdown lock for a resource, if appropriate
      *
      * \param[in] rsc       Resource to check for shutdown lock
      */
     void (*shutdown_lock)(pe_resource_t *rsc);
 };
 
 // Actions (pcmk_sched_actions.c)
 
 G_GNUC_INTERNAL
 void pcmk__update_action_for_orderings(pe_action_t *action,
                                        pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
                                       pe_node_t *node, uint32_t flags,
                                       uint32_t filter, uint32_t type,
                                       pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__log_action(const char *pre_text, pe_action_t *action, bool details);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
                                      guint interval_ms, pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__deduplicate_action_inputs(pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__output_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
                                xmlNode *xml_op);
 
 G_GNUC_INTERNAL
 void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
 
 // Producing transition graphs (pcmk_graph_producer.c)
 
 G_GNUC_INTERNAL
 bool pcmk__graph_has_loop(pe_action_t *init_action, pe_action_t *action,
                           pe_action_wrapper_t *input);
 
 G_GNUC_INTERNAL
 void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_graph(pe_working_set_t *data_set);
 
 
 // Fencing (pcmk_sched_fencing.c)
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_unfence(pe_resource_t *rsc, pe_node_t *node,
                             pe_action_t *action, enum pe_ordering order);
 
 G_GNUC_INTERNAL
 void pcmk__fence_guest(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__node_unfenced(pe_node_t *node);
 
 
 // Injected scheduler inputs (pcmk_sched_injections.c)
 
 void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
                                   pcmk_injections_t *injections);
 
 
 // Constraints of any type (pcmk_sched_constraints.c)
 
 G_GNUC_INTERNAL
 pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
                                    pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__valid_resource_or_tag(pe_working_set_t *data_set, const char *id,
                                  pe_resource_t **rsc, pe_tag_t **tag);
 
 G_GNUC_INTERNAL
 bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
                       bool convert_rsc, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__create_internal_constraints(pe_working_set_t *data_set);
 
 
 // Location constraints
 
 G_GNUC_INTERNAL
 void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
                                    int node_weight, const char *discover_mode,
                                    pe_node_t *foo_node,
                                    pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_locations(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_location(pe__location_t *constraint, pe_resource_t *rsc);
 
 
 // Colocation constraints (pcmk_sched_colocation.c)
 
 enum pcmk__coloc_affects {
     pcmk__coloc_affects_nothing = 0,
     pcmk__coloc_affects_location,
     pcmk__coloc_affects_role,
 };
 
 G_GNUC_INTERNAL
 enum pcmk__coloc_affects pcmk__colocation_affects(pe_resource_t *dependent,
                                                   pe_resource_t *primary,
                                                   pcmk__colocation_t *constraint,
                                                   bool preview);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
                                   pe_resource_t *primary,
                                   pcmk__colocation_t *constraint);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
                                    pe_resource_t *primary,
                                    pcmk__colocation_t *constraint);
 
 G_GNUC_INTERNAL
 void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
                                      GHashTable **nodes, const char *attr,
                                      float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__new_colocation(const char *id, const char *node_attr, int score,
                           pe_resource_t *dependent, pe_resource_t *primary,
                           const char *dependent_role, const char *primary_role,
                           bool influence, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__block_colocated_starts(pe_action_t *action,
                                   pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Check whether colocation's dependent preferences should be considered
  *
  * \param[in] colocation  Colocation constraint
  * \param[in] rsc         Primary instance (normally this will be
  *                        colocation->primary, which NULL will be treated as,
  *                        but for clones or bundles with multiple instances
  *                        this can be a particular instance)
  *
  * \return true if colocation influence should be effective, otherwise false
  */
 static inline bool
 pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
                                const pe_resource_t *rsc)
 {
     if (rsc == NULL) {
         rsc = colocation->primary;
     }
 
     /* A bundle replica colocates its remote connection with its container,
      * using a finite score so that the container can run on Pacemaker Remote
      * nodes.
      *
      * Moving a connection is lightweight and does not interrupt the service,
      * while moving a container is heavyweight and does interrupt the service,
      * so don't move a clean, active container based solely on the preferences
      * of its connection.
      *
      * This also avoids problematic scenarios where two containers want to
      * perpetually swap places.
      */
     if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes)
         && !pcmk_is_set(rsc->flags, pe_rsc_failed)
         && pcmk__list_of_1(rsc->running_on)) {
         return false;
     }
 
     /* The dependent in a colocation influences the primary's location
      * if the influence option is true or the primary is not yet active.
      */
     return colocation->influence || (rsc->running_on == NULL);
 }
 
 
 // Ordering constraints (pcmk_sched_ordering.c)
 
 G_GNUC_INTERNAL
 void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task,
                         pe_action_t *first_action, pe_resource_t *then_rsc,
                         char *then_task, pe_action_t *then_action,
                         enum pe_ordering type, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_stops_before_shutdown(pe_node_t *node,
                                        pe_action_t *shutdown_op);
 
 G_GNUC_INTERNAL
 void pcmk__apply_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_after_each(pe_action_t *after, GList *list);
 
 
 /*!
  * \internal
  * \brief Create a new ordering between two resource actions
  *
  * \param[in] first_rsc   Resource for 'first' action
  * \param[in] then_rsc    Resource for 'then' action
  * \param[in] first_task  Action key for 'first' action
  * \param[in] then_task   Action key for 'then' action
  * \param[in] flags       Bitmask of enum pe_ordering flags
  * \param[in] data_set    Cluster working set to add ordering to
  */
 #define pcmk__order_resource_actions(first_rsc, first_task,                 \
                                      then_rsc, then_task, flags)            \
     pcmk__new_ordering((first_rsc),                                         \
                        pcmk__op_key((first_rsc)->id, (first_task), 0),      \
                        NULL,                                                \
                        (then_rsc),                                          \
                        pcmk__op_key((then_rsc)->id, (then_task), 0),        \
                        NULL, (flags), (first_rsc)->cluster)
 
 #define pcmk__order_starts(rsc1, rsc2, type)                 \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_START,  \
                                  (rsc2), CRMD_ACTION_START, (type))
 
 #define pcmk__order_stops(rsc1, rsc2, type)                  \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP,   \
                                  (rsc2), CRMD_ACTION_STOP, (type))
 
 
 // Ticket constraints (pcmk_sched_tickets.c)
 
 G_GNUC_INTERNAL
 void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 
 // Promotable clone resources (pcmk_sched_promotable.c)
 
 G_GNUC_INTERNAL
 void pcmk__require_promotion_tickets(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__set_instance_roles(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_promotable_actions(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__promotable_restart_ordering(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__order_promotable_instances(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__update_dependent_with_promotable(pe_resource_t *primary,
                                             pe_resource_t *dependent,
                                             pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__update_promotable_dependent_priority(pe_resource_t *primary,
                                                 pe_resource_t *dependent,
                                                 pcmk__colocation_t *colocation);
 
 
 // Pacemaker Remote nodes (pcmk_sched_remote.c)
 
 G_GNUC_INTERNAL
 bool pcmk__is_failed_remote_node(pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_corresponds_to_guest(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__connection_host_for_action(pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
 
 G_GNUC_INTERNAL
 void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, pe_action_t *action);
 
 
 // Primitives (pcmk_sched_primitive.c)
 
 G_GNUC_INTERNAL
 void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
                                        pe_resource_t *primary,
                                        pcmk__colocation_t *colocation,
                                        bool for_dependent);
 
 // Groups (pcmk_sched_group.c)
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_coloc_score(pe_resource_t *dependent,
                                    pe_resource_t *primary,
                                    pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc,
                                            const char *log_id,
                                            GHashTable **nodes, const char *attr,
                                            float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 GList *pcmk__group_colocated_resources(pe_resource_t *rsc,
                                        pe_resource_t *orig_rsc,
                                        GList *colocated_rscs);
 
 // Clones (pcmk_sched_clone.c)
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
                                    pe_resource_t *primary,
                                    pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 // Bundles (pcmk_sched_bundle.c)
 
 G_GNUC_INTERNAL
 void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
                                     pe_resource_t *primary,
                                     pcmk__colocation_t *colocation,
                                     bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__output_bundle_actions(pe_resource_t *rsc);
 
 
 // Injections (pcmk_injections.c)
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
                                         bool up);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
                                        const char *resource,
                                        const char *lrm_name,
                                        const char *rclass,
                                        const char *rtype,
                                        const char *rprovider);
 
 G_GNUC_INTERNAL
 void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
                             const char *resource, const char *task,
                             guint interval_ms, int rc);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
                                     lrmd_event_data_t *op, int target_rc);
 
 
 // Nodes (pcmk_sched_nodes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__node_available(const pe_node_t *node, bool consider_score,
                           bool consider_guest);
 
 G_GNUC_INTERNAL
 bool pcmk__any_node_available(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GHashTable *pcmk__copy_node_table(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node);
 
 G_GNUC_INTERNAL
 void pcmk__apply_node_health(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
                                   const pe_node_t *node);
 
 
 // Functions applying to more than one variant (pcmk_sched_resource.c)
 
 G_GNUC_INTERNAL
 void pcmk__set_allocation_methods(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
                              const xmlNode *rsc_entry, bool active_on_node);
 
 G_GNUC_INTERNAL
 GList *pcmk__rscs_matching_id(const char *id, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 GList *pcmk__colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                  GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__output_resource_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_primitive(pe_resource_t *rsc, pe_node_t *chosen, bool force);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
 
 G_GNUC_INTERNAL
 void pcmk__unassign_resource(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__threshold_reached(pe_resource_t *rsc, pe_node_t *node,
                              pe_resource_t **failed);
 
 G_GNUC_INTERNAL
 void pcmk__sort_resources(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
 
 
 // Functions related to probes (pcmk_sched_probes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_probes(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_probes(pe_working_set_t *data_set);
 
 
 // Functions related to node utilization (pcmk_sched_utilization.c)
 
 G_GNUC_INTERNAL
 int pcmk__compare_node_capacities(const pe_node_t *node1,
                                   const pe_node_t *node2);
 
 G_GNUC_INTERNAL
 void pcmk__consume_node_capacity(GHashTable *current_utilization,
                                  pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__release_node_capacity(GHashTable *current_utilization,
                                  pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__ban_insufficient_capacity(pe_resource_t *rsc, pe_node_t **prefer);
 
 G_GNUC_INTERNAL
 void pcmk__create_utilization_constraints(pe_resource_t *rsc,
                                           GList *allowed_nodes);
 
 G_GNUC_INTERNAL
 void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
 
 #endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_allocate.c b/lib/pacemaker/pcmk_sched_allocate.c
index f163a49e25..e94a48b1b9 100644
--- a/lib/pacemaker/pcmk_sched_allocate.c
+++ b/lib/pacemaker/pcmk_sched_allocate.c
@@ -1,806 +1,806 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 CRM_TRACE_INIT_DATA(pacemaker);
 
 /*!
  * \internal
  * \brief Do deferred action checks after allocation
  *
  * When unpacking the resource history, the scheduler checks for resource
  * configurations that have changed since an action was run. However, at that
  * time, bundles using the REMOTE_CONTAINER_HACK don't have their final
  * parameter information, so instead they add a deferred check to a list. This
  * function processes one entry in that list.
  *
  * \param[in] rsc       Resource that action history is for
  * \param[in] node      Node that action history is for
  * \param[in] rsc_op    Action history entry
  * \param[in] check     Type of deferred check to do
  * \param[in] data_set  Working set for cluster
  */
 static void
 check_params(pe_resource_t *rsc, pe_node_t *node, xmlNode *rsc_op,
              enum pe_check_parameters check, pe_working_set_t *data_set)
 {
     const char *reason = NULL;
     op_digest_cache_t *digest_data = NULL;
 
     switch (check) {
         case pe_check_active:
             if (pcmk__check_action_config(rsc, node, rsc_op)
                 && pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                                     data_set)) {
                 reason = "action definition changed";
             }
             break;
 
         case pe_check_last_failure:
             digest_data = rsc_action_digest_cmp(rsc, rsc_op, node, data_set);
             switch (digest_data->rc) {
                 case RSC_DIGEST_UNKNOWN:
                     crm_trace("Resource %s history entry %s on %s has "
                               "no digest to compare",
                               rsc->id, ID(rsc_op), node->details->id);
                     break;
                 case RSC_DIGEST_MATCH:
                     break;
                 default:
                     reason = "resource parameters have changed";
                     break;
             }
             break;
     }
     if (reason != NULL) {
         pe__clear_failcount(rsc, node, reason, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has failcount clearing scheduled on a node
  *
  * \param[in] node  Node to check
  * \param[in] rsc   Resource to check
  *
  * \return true if \p rsc has failcount clearing scheduled on \p node,
  *         otherwise false
  */
 static bool
 failcount_clear_action_exists(pe_node_t *node, pe_resource_t *rsc)
 {
     GList *list = pe__resource_actions(rsc, node, CRM_OP_CLEAR_FAILCOUNT, TRUE);
 
     if (list != NULL) {
         g_list_free(list);
         return true;
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Ban a resource from a node if it reached its failure threshold there
  *
  * \param[in] rsc       Resource to check failure threshold for
  * \param[in] node      Node to check \p rsc on
  */
 static void
 check_failure_threshold(pe_resource_t *rsc, pe_node_t *node)
 {
     // If this is a collective resource, apply recursively to children instead
     if (rsc->children != NULL) {
         g_list_foreach(rsc->children, (GFunc) check_failure_threshold,
                        node);
         return;
 
     } else if (failcount_clear_action_exists(node, rsc)) {
         /* Don't force the resource away from this node due to a failcount
          * that's going to be cleared.
          *
          * @TODO Failcount clearing can be scheduled in
          * pcmk__handle_rsc_config_changes() via process_rsc_history(), or in
          * schedule_resource_actions() via check_params(). This runs well before
          * then, so it cannot detect those, meaning we might check the migration
          * threshold when we shouldn't. Worst case, we stop or move the
          * resource, then move it back in the next transition.
          */
         return;
 
     } else {
         pe_resource_t *failed = NULL;
 
         if (pcmk__threshold_reached(rsc, node, &failed)) {
             resource_location(failed, node, -INFINITY, "__fail_limit__",
                               rsc->cluster);
         }
     }
 }
 
 /*!
  * \internal
  * \brief If resource has exclusive discovery, ban node if not allowed
  *
  * Location constraints have a resource-discovery option that allows users to
  * specify where probes are done for the affected resource. If this is set to
  * exclusive, probes will only be done on nodes listed in exclusive constraints.
  * This function bans the resource from the node if the node is not listed.
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check \p rsc on
  */
 static void
 apply_exclusive_discovery(pe_resource_t *rsc, pe_node_t *node)
 {
     if (rsc->exclusive_discover || uber_parent(rsc)->exclusive_discover) {
         pe_node_t *match = NULL;
 
         // If this is a collective resource, apply recursively to children
         g_list_foreach(rsc->children, (GFunc) apply_exclusive_discovery, node);
 
         match = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
         if ((match != NULL)
             && (match->rsc_discover_mode != pe_discover_exclusive)) {
             match->weight = -INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Apply stickiness to a resource if appropriate
  *
  * \param[in] rsc       Resource to check for stickiness
  * \param[in] data_set  Cluster working set
  */
 static void
 apply_stickiness(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     pe_node_t *node = NULL;
 
     // If this is a collective resource, apply recursively to children instead
     if (rsc->children != NULL) {
         g_list_foreach(rsc->children, (GFunc) apply_stickiness, data_set);
         return;
     }
 
     /* A resource is sticky if it is managed, has stickiness configured, and is
      * active on a single node.
      */
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)
         || (rsc->stickiness < 1) || !pcmk__list_of_1(rsc->running_on)) {
         return;
     }
 
     node = rsc->running_on->data;
 
     /* In a symmetric cluster, stickiness can always be used. In an
      * asymmetric cluster, we have to check whether the resource is still
      * allowed on the node, so we don't keep the resource somewhere it is no
      * longer explicitly enabled.
      */
     if (!pcmk_is_set(rsc->cluster->flags, pe_flag_symmetric_cluster)
         && (pe_hash_table_lookup(rsc->allowed_nodes,
                                  node->details->id) == NULL)) {
         pe_rsc_debug(rsc,
                      "Ignoring %s stickiness because the cluster is "
                      "asymmetric and node %s is not explicitly allowed",
                      rsc->id, node->details->uname);
         return;
     }
 
     pe_rsc_debug(rsc, "Resource %s has %d stickiness on node %s",
                  rsc->id, rsc->stickiness, node->details->uname);
     resource_location(rsc, node, rsc->stickiness, "stickiness",
                       rsc->cluster);
 }
 
 /*!
  * \internal
  * \brief Apply shutdown locks for all resources as appropriate
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 apply_shutdown_locks(pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_shutdown_lock)) {
         return;
     }
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         rsc->cmds->shutdown_lock(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Calculate the number of available nodes in the cluster
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 count_available_nodes(pe_working_set_t *data_set)
 {
     if (pcmk_is_set(data_set->flags, pe_flag_no_compat)) {
         return;
     }
 
     // @COMPAT for API backward compatibility only (cluster does not use value)
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
 
         if ((node != NULL) && (node->weight >= 0) && node->details->online
             && (node->details->type != node_ping)) {
             data_set->max_valid_nodes++;
         }
     }
     crm_trace("Online node count: %d", data_set->max_valid_nodes);
 }
 
 /*
  * \internal
  * \brief Apply node-specific scheduling criteria
  *
  * After the CIB has been unpacked, process node-specific scheduling criteria
  * including shutdown locks, location constraints, resource stickiness,
  * migration thresholds, and exclusive resource discovery.
  */
 static void
 apply_node_criteria(pe_working_set_t *data_set)
 {
     crm_trace("Applying node-specific scheduling criteria");
     apply_shutdown_locks(data_set);
     count_available_nodes(data_set);
     pcmk__apply_locations(data_set);
     g_list_foreach(data_set->resources, (GFunc) apply_stickiness, data_set);
 
     for (GList *node_iter = data_set->nodes; node_iter != NULL;
          node_iter = node_iter->next) {
         for (GList *rsc_iter = data_set->resources; rsc_iter != NULL;
              rsc_iter = rsc_iter->next) {
             pe_node_t *node = (pe_node_t *) node_iter->data;
             pe_resource_t *rsc = (pe_resource_t *) rsc_iter->data;
 
             check_failure_threshold(rsc, node);
             apply_exclusive_discovery(rsc, node);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Allocate resources to nodes
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 allocate_resources(pe_working_set_t *data_set)
 {
     GList *iter = NULL;
 
     crm_trace("Allocating resources to nodes");
 
     if (!pcmk__str_eq(data_set->placement_strategy, "default", pcmk__str_casei)) {
         pcmk__sort_resources(data_set);
     }
     pcmk__show_node_capacities("Original", data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_have_remote_nodes)) {
         /* Allocate remote connection resources first (which will also allocate
          * any colocation dependencies). If the connection is migrating, always
          * prefer the partial migration target.
          */
         for (iter = data_set->resources; iter != NULL; iter = iter->next) {
             pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
             if (rsc->is_remote_node) {
                 pe_rsc_trace(rsc, "Allocating remote connection resource '%s'",
                              rsc->id);
-                rsc->cmds->allocate(rsc, rsc->partial_migration_target);
+                rsc->cmds->assign(rsc, rsc->partial_migration_target);
             }
         }
     }
 
     /* now do the rest of the resources */
     for (iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         if (!rsc->is_remote_node) {
             pe_rsc_trace(rsc, "Allocating %s resource '%s'",
                          crm_element_name(rsc->xml), rsc->id);
-            rsc->cmds->allocate(rsc, NULL);
+            rsc->cmds->assign(rsc, NULL);
         }
     }
 
     pcmk__show_node_capacities("Remaining", data_set);
 }
 
 /*!
  * \internal
  * \brief Schedule fail count clearing on online nodes if resource is orphaned
  *
  * \param[in] rsc       Resource to check
  * \param[in] data_set  Cluster working set
  */
 static void
 clear_failcounts_if_orphaned(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         return;
     }
     crm_trace("Clear fail counts for orphaned resource %s", rsc->id);
 
     /* There's no need to recurse into rsc->children because those
      * should just be unallocated clone instances.
      */
 
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         pe_action_t *clear_op = NULL;
 
         if (!node->details->online) {
             continue;
         }
         if (pe_get_failcount(node, rsc, NULL, pe_fc_effective, NULL,
                              data_set) == 0) {
             continue;
         }
 
         clear_op = pe__clear_failcount(rsc, node, "it is orphaned", data_set);
 
         /* We can't use order_action_then_stop() here because its
          * pe_order_preserve breaks things
          */
         pcmk__new_ordering(clear_op->rsc, NULL, clear_op, rsc, stop_key(rsc),
                            NULL, pe_order_optional, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule any resource actions needed
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 schedule_resource_actions(pe_working_set_t *data_set)
 {
     // Process deferred action checks
     pe__foreach_param_check(data_set, check_params);
     pe__free_param_checks(data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_startup_probes)) {
         crm_trace("Scheduling probes");
         pcmk__schedule_probes(data_set);
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_stop_rsc_orphans)) {
         g_list_foreach(data_set->resources,
                        (GFunc) clear_failcounts_if_orphaned, data_set);
     }
 
     crm_trace("Scheduling resource actions");
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         pe_resource_t *rsc = (pe_resource_t *) iter->data;
 
         rsc->cmds->create_actions(rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource or any of its descendants are managed
  *
  * \param[in] rsc  Resource to check
  *
  * \return true if resource or any descendent is managed, otherwise false
  */
 static bool
 is_managed(const pe_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         return true;
     }
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         if (is_managed((pe_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether any resources in the cluster are managed
  *
  * \param[in] data_set  Cluster working set
  *
  * \return true if any resource is managed, otherwise false
  */
 static bool
 any_managed_resources(pe_working_set_t *data_set)
 {
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         if (is_managed((pe_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a node requires fencing
  *
  * \param[in] node          Node to check
  * \param[in] have_managed  Whether any resource in cluster is managed
  * \param[in] data_set      Cluster working set
  *
  * \return true if \p node should be fenced, otherwise false
  */
 static bool
 needs_fencing(pe_node_t *node, bool have_managed, pe_working_set_t *data_set)
 {
     return have_managed && node->details->unclean
            && pe_can_fence(data_set, node);
 }
 
 /*!
  * \internal
  * \brief Check whether a node requires shutdown
  *
  * \param[in] node          Node to check
  *
  * \return true if \p node should be shut down, otherwise false
  */
 static bool
 needs_shutdown(pe_node_t *node)
 {
     if (pe__is_guest_or_remote_node(node)) {
        /* Do not send shutdown actions for Pacemaker Remote nodes.
         * @TODO We might come up with a good use for this in the future.
         */
         return false;
     }
     return node->details->online && node->details->shutdown;
 }
 
 /*!
  * \internal
  * \brief Track and order non-DC fencing
  *
  * \param[in] list    List of existing non-DC fencing actions
  * \param[in] action  Fencing action to prepend to \p list
  *
  * \return (Possibly new) head of \p list
  */
 static GList *
 add_nondc_fencing(GList *list, pe_action_t *action, pe_working_set_t *data_set)
 {
     if (!pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)
         && (list != NULL)) {
         /* Concurrent fencing is disabled, so order each non-DC
          * fencing in a chain. If there is any DC fencing or
          * shutdown, it will be ordered after the last action in the
          * chain later.
          */
         order_actions((pe_action_t *) list->data, action, pe_order_optional);
     }
     return g_list_prepend(list, action);
 }
 
 /*!
  * \internal
  * \brief Schedule a node for fencing
  *
  * \param[in] node      Node that requires fencing
  * \param[in] data_set  Cluster working set
  */
 static pe_action_t *
 schedule_fencing(pe_node_t *node, pe_working_set_t *data_set)
 {
     pe_action_t *fencing = pe_fence_op(node, NULL, FALSE, "node is unclean",
                                        FALSE, data_set);
 
     pe_warn("Scheduling node %s for fencing", node->details->uname);
     pcmk__order_vs_fence(fencing, data_set);
     return fencing;
 }
 
 /*!
  * \internal
  * \brief Create and order node fencing and shutdown actions
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 schedule_fencing_and_shutdowns(pe_working_set_t *data_set)
 {
     pe_action_t *dc_down = NULL;
     bool integrity_lost = false;
     bool have_managed = any_managed_resources(data_set);
     GList *fencing_ops = NULL;
     GList *shutdown_ops = NULL;
 
     crm_trace("Scheduling fencing and shutdowns as needed");
     if (!have_managed) {
         crm_notice("No fencing will be done until there are resources to manage");
     }
 
     // Check each node for whether it needs fencing or shutdown
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         pe_action_t *fencing = NULL;
 
         /* Guest nodes are "fenced" by recovering their container resource,
          * so handle them separately.
          */
         if (pe__is_guest_node(node)) {
             if (node->details->remote_requires_reset && have_managed
                 && pe_can_fence(data_set, node)) {
                 pcmk__fence_guest(node);
             }
             continue;
         }
 
         if (needs_fencing(node, have_managed, data_set)) {
             fencing = schedule_fencing(node, data_set);
 
             // Track DC and non-DC fence actions separately
             if (node->details->is_dc) {
                 dc_down = fencing;
             } else {
                 fencing_ops = add_nondc_fencing(fencing_ops, fencing, data_set);
             }
 
         } else if (needs_shutdown(node)) {
             pe_action_t *down_op = pcmk__new_shutdown_action(node);
 
             // Track DC and non-DC shutdown actions separately
             if (node->details->is_dc) {
                 dc_down = down_op;
             } else {
                 shutdown_ops = g_list_prepend(shutdown_ops, down_op);
             }
         }
 
         if ((fencing == NULL) && node->details->unclean) {
             integrity_lost = true;
             pe_warn("Node %s is unclean but cannot be fenced",
                     node->details->uname);
         }
     }
 
     if (integrity_lost) {
         if (!pcmk_is_set(data_set->flags, pe_flag_stonith_enabled)) {
             pe_warn("Resource functionality and data integrity cannot be "
                     "guaranteed (configure, enable, and test fencing to "
                     "correct this)");
 
         } else if (!pcmk_is_set(data_set->flags, pe_flag_have_quorum)) {
             crm_notice("Unclean nodes will not be fenced until quorum is "
                        "attained or no-quorum-policy is set to ignore");
         }
     }
 
     if (dc_down != NULL) {
         /* Order any non-DC shutdowns before any DC shutdown, to avoid repeated
          * DC elections. However, we don't want to order non-DC shutdowns before
          * a DC *fencing*, because even though we don't want a node that's
          * shutting down to become DC, the DC fencing could be ordered before a
          * clone stop that's also ordered before the shutdowns, thus leading to
          * a graph loop.
          */
         if (pcmk__str_eq(dc_down->task, CRM_OP_SHUTDOWN, pcmk__str_none)) {
             pcmk__order_after_each(dc_down, shutdown_ops);
         }
 
         // Order any non-DC fencing before any DC fencing or shutdown
 
         if (pcmk_is_set(data_set->flags, pe_flag_concurrent_fencing)) {
             /* With concurrent fencing, order each non-DC fencing action
              * separately before any DC fencing or shutdown.
              */
             pcmk__order_after_each(dc_down, fencing_ops);
         } else if (fencing_ops != NULL) {
             /* Without concurrent fencing, the non-DC fencing actions are
              * already ordered relative to each other, so we just need to order
              * the DC fencing after the last action in the chain (which is the
              * first item in the list).
              */
             order_actions((pe_action_t *) fencing_ops->data, dc_down,
                           pe_order_optional);
         }
     }
     g_list_free(fencing_ops);
     g_list_free(shutdown_ops);
 }
 
 static void
 log_resource_details(pe_working_set_t *data_set)
 {
     pcmk__output_t *out = data_set->priv;
     GList *all = NULL;
 
     /* We need a list of nodes that we are allowed to output information for.
      * This is necessary because out->message for all the resource-related
      * messages expects such a list, due to the `crm_mon --node=` feature.  Here,
      * we just make it a list of all the nodes.
      */
     all = g_list_prepend(all, (gpointer) "*");
 
     for (GList *item = data_set->resources; item != NULL; item = item->next) {
         pe_resource_t *rsc = (pe_resource_t *) item->data;
 
         // Log all resources except inactive orphans
         if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
             || (rsc->role != RSC_ROLE_STOPPED)) {
             out->message(out, crm_map_element_name(rsc->xml), 0, rsc, all, all);
         }
     }
 
     g_list_free(all);
 }
 
 static void
 log_all_actions(pe_working_set_t *data_set)
 {
     /* This only ever outputs to the log, so ignore whatever output object was
      * previously set and just log instead.
      */
     pcmk__output_t *prev_out = data_set->priv;
     pcmk__output_t *out = NULL;
 
     if (pcmk__log_output_new(&out) != pcmk_rc_ok) {
         return;
     }
 
     pe__register_messages(out);
     pcmk__register_lib_messages(out);
     pcmk__output_set_log_level(out, LOG_NOTICE);
     data_set->priv = out;
 
     out->begin_list(out, NULL, NULL, "Actions");
     pcmk__output_actions(data_set);
     out->end_list(out);
     out->finish(out, CRM_EX_OK, true, NULL);
     pcmk__output_free(out);
 
     data_set->priv = prev_out;
 }
 
 /*!
  * \internal
  * \brief Log all required but unrunnable actions at trace level
  *
  * \param[in] data_set  Cluster working set
  */
 static void
 log_unrunnable_actions(pe_working_set_t *data_set)
 {
     const uint64_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
 
     crm_trace("Required but unrunnable actions:");
     for (GList *iter = data_set->actions; iter != NULL; iter = iter->next) {
         pe_action_t *action = (pe_action_t *) iter->data;
 
         if (!pcmk_any_flags_set(action->flags, flags)) {
             pcmk__log_action("\t", action, true);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Unpack the CIB for scheduling
  *
  * \param[in] cib       CIB XML to unpack (may be NULL if previously unpacked)
  * \param[in] flags     Working set flags to set in addition to defaults
  * \param[in] data_set  Cluster working set
  */
 static void
 unpack_cib(xmlNode *cib, unsigned long long flags, pe_working_set_t *data_set)
 {
     const char* localhost_save = NULL;
 
     if (pcmk_is_set(data_set->flags, pe_flag_have_status)) {
         crm_trace("Reusing previously calculated cluster status");
         pe__set_working_set_flags(data_set, flags);
         return;
     }
 
     if (data_set->localhost) {
         localhost_save = data_set->localhost;
     }
 
     CRM_ASSERT(cib != NULL);
     crm_trace("Calculating cluster status");
 
     /* This will zero the entire struct without freeing anything first, so
      * callers should never call pcmk__schedule_actions() with a populated data
      * set unless pe_flag_have_status is set (i.e. cluster_status() was
      * previously called, whether directly or via pcmk__schedule_actions()).
      */
     set_working_set_defaults(data_set);
 
     if (localhost_save) {
         data_set->localhost = localhost_save;
     }
 
     pe__set_working_set_flags(data_set, flags);
     data_set->input = cib;
     cluster_status(data_set); // Sets pe_flag_have_status
 }
 
 /*!
  * \internal
  * \brief Run the scheduler for a given CIB
  *
  * \param[in]     cib       CIB XML to use as scheduler input
  * \param[in]     flags     Working set flags to set in addition to defaults
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__schedule_actions(xmlNode *cib, unsigned long long flags,
                        pe_working_set_t *data_set)
 {
     unpack_cib(cib, flags, data_set);
     pcmk__set_allocation_methods(data_set);
     pcmk__apply_node_health(data_set);
     pcmk__unpack_constraints(data_set);
     if (pcmk_is_set(data_set->flags, pe_flag_check_config)) {
         return;
     }
 
     if (!pcmk_is_set(data_set->flags, pe_flag_quick_location) &&
          pcmk__is_daemon) {
         log_resource_details(data_set);
     }
 
     apply_node_criteria(data_set);
 
     if (pcmk_is_set(data_set->flags, pe_flag_quick_location)) {
         return;
     }
 
     pcmk__create_internal_constraints(data_set);
     pcmk__handle_rsc_config_changes(data_set);
     allocate_resources(data_set);
     schedule_resource_actions(data_set);
 
     /* Remote ordering constraints need to happen prior to calculating fencing
      * because it is one more place we can mark nodes as needing fencing.
      */
     pcmk__order_remote_connection_actions(data_set);
 
     schedule_fencing_and_shutdowns(data_set);
     pcmk__apply_orderings(data_set);
     log_all_actions(data_set);
     pcmk__create_graph(data_set);
 
     if (get_crm_log_level() == LOG_TRACE) {
         log_unrunnable_actions(data_set);
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index 55bb6929a5..3053c3e1e0 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,1146 +1,1155 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define PE__VARIANT_BUNDLE 1
 #include <lib/pengine/variant.h>
 
 static bool
 is_bundle_node(pe__bundle_variant_data_t *data, pe_node_t *node)
 {
     for (GList *gIter = data->replicas; gIter != NULL; gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (node->details == replica->node->details) {
             return TRUE;
         }
     }
     return FALSE;
 }
 
 void distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                          int max, int per_host_max, pe_working_set_t * data_set);
 
 static GList *
 get_container_list(pe_resource_t *rsc)
 {
     GList *containers = NULL;
 
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             containers = g_list_append(containers, replica->container);
         }
     }
     return containers;
 }
 
 static inline GList *
 get_containers_or_children(pe_resource_t *rsc)
 {
     return (rsc->variant == pe_container)?
            get_container_list(rsc) : rsc->children;
 }
 
+/*!
+ * \internal
+ * \brief Assign a bundle resource to a node
+ *
+ * \param[in] rsc     Resource to assign to a node
+ * \param[in] prefer  Node to prefer, if all else is equal
+ *
+ * \return Node that \p rsc is assigned to, if assigned entirely to one node
+ */
 pe_node_t *
 pcmk__bundle_allocate(pe_resource_t *rsc, pe_node_t *prefer)
 {
     GList *containers = NULL;
     GList *nodes = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     containers = get_container_list(rsc);
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     nodes = g_hash_table_get_values(rsc->allowed_nodes);
     nodes = pcmk__sort_nodes(nodes, NULL);
     containers = g_list_sort(containers, pcmk__cmp_instance);
     distribute_children(rsc, containers, nodes, bundle_data->nreplicas,
                         bundle_data->nreplicas_per_host, rsc->cluster);
     g_list_free(nodes);
     g_list_free(containers);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         pe_node_t *container_host = NULL;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             pe_rsc_trace(rsc, "Allocating bundle %s IP %s",
                          rsc->id, replica->ip->id);
-            replica->ip->cmds->allocate(replica->ip, prefer);
+            replica->ip->cmds->assign(replica->ip, prefer);
         }
 
         container_host = replica->container->allocated_to;
         if (replica->remote && pe__is_guest_or_remote_node(container_host)) {
             /* We need 'nested' connection resources to be on the same
              * host because pacemaker-remoted only supports a single
              * active connection
              */
             pcmk__new_colocation("child-remote-with-docker-remote", NULL,
                                  INFINITY, replica->remote,
                                  container_host->details->remote_rsc, NULL,
                                  NULL, true, rsc->cluster);
         }
 
         if (replica->remote) {
             pe_rsc_trace(rsc, "Allocating bundle %s connection %s",
                          rsc->id, replica->remote->id);
-            replica->remote->cmds->allocate(replica->remote, prefer);
+            replica->remote->cmds->assign(replica->remote, prefer);
         }
 
         // Explicitly allocate replicas' children before bundle child
         if (replica->child) {
             pe_node_t *node = NULL;
             GHashTableIter iter;
 
             g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
             while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
                 if (node->details != replica->node->details) {
                     node->weight = -INFINITY;
                 } else if (!pcmk__threshold_reached(replica->child, node,
                                                     NULL)) {
                     node->weight = INFINITY;
                 }
             }
 
             pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
             pe_rsc_trace(rsc, "Allocating bundle %s replica child %s",
                          rsc->id, replica->child->id);
-            replica->child->cmds->allocate(replica->child, replica->node);
+            replica->child->cmds->assign(replica->child, replica->node);
             pe__clear_resource_flags(replica->child->parent,
                                        pe_rsc_allocating);
         }
     }
 
     if (bundle_data->child) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, bundle_data->child->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
             if (is_bundle_node(bundle_data, node)) {
                 node->weight = 0;
             } else {
                 node->weight = -INFINITY;
             }
         }
         pe_rsc_trace(rsc, "Allocating bundle %s child %s",
                      rsc->id, bundle_data->child->id);
-        bundle_data->child->cmds->allocate(bundle_data->child, prefer);
+        bundle_data->child->cmds->assign(bundle_data->child, prefer);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
     return NULL;
 }
 
 
 void
 pcmk__bundle_create_actions(pe_resource_t *rsc)
 {
     pe_action_t *action = NULL;
     GList *containers = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     containers = get_container_list(rsc);
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip) {
             replica->ip->cmds->create_actions(replica->ip);
         }
         if (replica->container) {
             replica->container->cmds->create_actions(replica->container);
         }
         if (replica->remote) {
             replica->remote->cmds->create_actions(replica->remote);
         }
     }
 
     clone_create_pseudo_actions(rsc, containers, NULL, NULL);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->create_actions(bundle_data->child);
 
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             /* promote */
             pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
             action->priority = INFINITY;
 
             /* demote */
             pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
             action->priority = INFINITY;
         }
     }
 
     g_list_free(containers);
 }
 
 void
 pcmk__bundle_internal_constraints(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         pcmk__order_resource_actions(rsc, RSC_START, bundle_data->child,
                                      RSC_START, pe_order_implies_first_printed);
         pcmk__order_resource_actions(rsc, RSC_STOP, bundle_data->child,
                                      RSC_STOP, pe_order_implies_first_printed);
 
         if (bundle_data->child->children) {
             pcmk__order_resource_actions(bundle_data->child, RSC_STARTED, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed);
             pcmk__order_resource_actions(bundle_data->child, RSC_STOPPED, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed);
         } else {
             pcmk__order_resource_actions(bundle_data->child, RSC_START, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed);
             pcmk__order_resource_actions(bundle_data->child, RSC_STOP, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed);
         }
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         CRM_ASSERT(replica->container);
 
         replica->container->cmds->internal_constraints(replica->container);
 
         pcmk__order_starts(rsc, replica->container,
                            pe_order_runnable_left|pe_order_implies_first_printed);
 
         if (replica->child) {
             pcmk__order_stops(rsc, replica->child,
                               pe_order_implies_first_printed);
         }
         pcmk__order_stops(rsc, replica->container,
                           pe_order_implies_first_printed);
         pcmk__order_resource_actions(replica->container, RSC_START, rsc,
                                      RSC_STARTED,
                                      pe_order_implies_then_printed);
         pcmk__order_resource_actions(replica->container, RSC_STOP, rsc,
                                      RSC_STOPPED,
                                      pe_order_implies_then_printed);
 
         if (replica->ip) {
             replica->ip->cmds->internal_constraints(replica->ip);
 
             // Start IP then container
             pcmk__order_starts(replica->ip, replica->container,
                                pe_order_runnable_left|pe_order_preserve);
             pcmk__order_stops(replica->container, replica->ip,
                               pe_order_implies_first|pe_order_preserve);
 
             pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
                                  replica->container, NULL, NULL, true,
                                  rsc->cluster);
         }
 
         if (replica->remote) {
             /* This handles ordering and colocating remote relative to container
              * (via "resource-with-container"). Since IP is also ordered and
              * colocated relative to the container, we don't need to do anything
              * explicit here with IP.
              */
             replica->remote->cmds->internal_constraints(replica->remote);
         }
 
         if (replica->child) {
             CRM_ASSERT(replica->remote);
 
             // "Start remote then child" is implicit in scheduler's remote logic
         }
 
     }
 
     if (bundle_data->child) {
         bundle_data->child->cmds->internal_constraints(bundle_data->child);
         if (pcmk_is_set(bundle_data->child->flags, pe_rsc_promotable)) {
             pcmk__promotable_restart_ordering(rsc);
 
             /* child demoted before global demoted */
             pcmk__order_resource_actions(bundle_data->child, RSC_DEMOTED, rsc,
                                          RSC_DEMOTED,
                                          pe_order_implies_then_printed);
 
             /* global demote before child demote */
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundle_data->child,
                                          RSC_DEMOTE,
                                          pe_order_implies_first_printed);
 
             /* child promoted before global promoted */
             pcmk__order_resource_actions(bundle_data->child, RSC_PROMOTED, rsc,
                                          RSC_PROMOTED,
                                          pe_order_implies_then_printed);
 
             /* global promote before child promote */
             pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundle_data->child,
                                          RSC_PROMOTE,
                                          pe_order_implies_first_printed);
         }
     }
 }
 
 static pe_resource_t *
 compatible_replica_for_node(pe_resource_t *rsc_lh, pe_node_t *candidate,
                             pe_resource_t *rsc, enum rsc_role_e filter,
                             gboolean current)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(candidate != NULL, return NULL);
     get_bundle_variant_data(bundle_data, rsc);
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               rsc_lh->id, rsc->id, candidate->details->uname);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (is_child_compatible(replica->container, candidate, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       rsc_lh->id, replica->container->id,
                       candidate->details->uname);
             return replica->container;
         }
     }
 
     crm_trace("Can't pair %s with %s", rsc_lh->id, rsc->id);
     return NULL;
 }
 
 static pe_resource_t *
 compatible_replica(pe_resource_t *rsc_lh, pe_resource_t *rsc,
                    enum rsc_role_e filter, gboolean current,
                    pe_working_set_t *data_set)
 {
     GList *scratch = NULL;
     pe_resource_t *pair = NULL;
     pe_node_t *active_node_lh = NULL;
 
     active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, current);
     if (active_node_lh) {
         return compatible_replica_for_node(rsc_lh, active_node_lh, rsc, filter,
                                            current);
     }
 
     scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL);
 
     for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = compatible_replica_for_node(rsc_lh, node, rsc, filter, current);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
   done:
     g_list_free(scratch);
     return pair;
 }
 
 int copies_per_node(pe_resource_t * rsc) 
 {
     /* Strictly speaking, there should be a 'copies_per_node' addition
      * to the resource function table and each case would be a
      * function.  However that would be serious overkill to return an
      * int.  In fact, it seems to me that both function tables
      * could/should be replaced by resources.{c,h} full of
      * rsc_{some_operation} functions containing a switch as below
      * which calls out to functions named {variant}_{some_operation}
      * as needed.
      */
     switch(rsc->variant) {
         case pe_unknown:
             return 0;
         case pe_native:
         case pe_group:
             return 1;
         case pe_clone:
             {
                 const char *max_clones_node = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION_NODEMAX);
 
                 if (max_clones_node == NULL) {
                     return 1;
 
                 } else {
                     int max_i;
 
                     pcmk__scan_min_int(max_clones_node, &max_i, 0);
                     return max_i;
                 }
             }
         case pe_container:
             {
                 pe__bundle_variant_data_t *data = NULL;
                 get_bundle_variant_data(data, rsc);
                 return data->nreplicas_per_host;
             }
     }
     return 0;
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in] dependent      Dependent resource in colocation
  * \param[in] primary        Primary resource in colocation
  * \param[in] colocation     Colocation constraint to apply
  * \param[in] for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__bundle_apply_coloc_score(pe_resource_t *dependent, pe_resource_t *primary,
                                pcmk__colocation_t *colocation,
                                bool for_dependent)
 {
     GList *allocated_primaries = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     /* This should never be called for the bundle itself as a dependent.
      * Instead, we add its colocation constraints to its replicas and call the
      * apply_coloc_score() for the replicas as dependents.
      */
     CRM_ASSERT(!for_dependent);
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
     CRM_ASSERT(dependent->variant == pe_native);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary, "%s is still provisional", primary->id);
         return;
 
     } else if (colocation->dependent->variant > pe_group) {
         pe_resource_t *primary_replica = compatible_replica(dependent, primary,
                                                             RSC_ROLE_UNKNOWN,
                                                             FALSE,
                                                             dependent->cluster);
 
         if (primary_replica) {
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_replica->id);
             dependent->cmds->apply_coloc_score(dependent, primary_replica,
                                                colocation, true);
 
         } else if (colocation->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true);
 
         } else {
             pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
                          dependent->id, primary->id);
         }
 
         return;
     }
 
     get_bundle_variant_data(bundle_data, primary);
     pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
                  colocation->id, dependent->id, primary->id, colocation->score);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (colocation->score < INFINITY) {
             replica->container->cmds->apply_coloc_score(dependent,
                                                         replica->container,
                                                         colocation, false);
 
         } else {
             pe_node_t *chosen = replica->container->fns->location(replica->container,
                                                                   NULL, FALSE);
 
             if ((chosen == NULL)
                 || is_set_recursive(replica->container, pe_rsc_block, TRUE)) {
                 continue;
             }
             if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
                 && (replica->child == NULL)) {
                 continue;
             }
             if ((colocation->primary_role >= RSC_ROLE_PROMOTED)
                 && (replica->child->next_role < RSC_ROLE_PROMOTED)) {
                 continue;
             }
 
             pe_rsc_trace(primary, "Allowing %s: %s %d",
                          colocation->id, chosen->details->uname,
                          chosen->weight);
             allocated_primaries = g_list_prepend(allocated_primaries, chosen);
         }
     }
 
     if (colocation->score >= INFINITY) {
         node_list_exclude(dependent->allowed_nodes, allocated_primaries, FALSE);
     }
     g_list_free(allocated_primaries);
 }
 
 enum pe_action_flags
 pcmk__bundle_action_flags(pe_action_t *action, pe_node_t *node)
 {
     GList *containers = NULL;
     enum pe_action_flags flags = 0;
     pe__bundle_variant_data_t *data = NULL;
 
     get_bundle_variant_data(data, action->rsc);
     if(data->child) {
         enum action_tasks task = get_complex_task(data->child, action->task, TRUE);
         switch(task) {
             case no_action:
             case action_notify:
             case action_notified:
             case action_promote:
             case action_promoted:
             case action_demote:
             case action_demoted:
                 return summary_action_flags(action, data->child->children, node);
             default:
                 break;
         }
     }
 
     containers = get_container_list(action->rsc);
     flags = summary_action_flags(action, containers, node);
     g_list_free(containers);
     return flags;
 }
 
 pe_resource_t *
 find_compatible_child_by_node(pe_resource_t * local_child, pe_node_t * local_node, pe_resource_t * rsc,
                               enum rsc_role_e filter, gboolean current)
 {
     GList *gIter = NULL;
     GList *children = NULL;
 
     if (local_node == NULL) {
         crm_err("Can't colocate unrunnable child %s with %s", local_child->id, rsc->id);
         return NULL;
     }
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               local_child->id, rsc->id, local_node->details->uname);
 
     children = get_containers_or_children(rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         if(is_child_compatible(child_rsc, local_node, filter, current)) {
             crm_trace("Pairing %s with %s on %s",
                       local_child->id, child_rsc->id, local_node->details->uname);
             return child_rsc;
         }
     }
 
     crm_trace("Can't pair %s with %s", local_child->id, rsc->id);
     if(children != rsc->children) {
         g_list_free(children);
     }
     return NULL;
 }
 
 static pe__bundle_replica_t *
 replica_for_container(pe_resource_t *rsc, pe_resource_t *container,
                       pe_node_t *node)
 {
     if (rsc->variant == pe_container) {
         pe__bundle_variant_data_t *data = NULL;
 
         get_bundle_variant_data(data, rsc);
         for (GList *gIter = data->replicas; gIter != NULL;
              gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->child
                 && (container == replica->container)
                 && (node->details == replica->node->details)) {
                 return replica;
             }
         }
     }
     return NULL;
 }
 
 static uint32_t
 multi_update_interleave_actions(pe_action_t *first, pe_action_t *then,
                                 pe_node_t *node, uint32_t filter, uint32_t type,
                                 pe_working_set_t *data_set)
 {
     GList *gIter = NULL;
     GList *children = NULL;
     gboolean current = FALSE;
     uint32_t changed = pcmk__updated_none;
 
     /* Fix this - lazy */
     if (pcmk__ends_with(first->uuid, "_stopped_0")
         || pcmk__ends_with(first->uuid, "_demoted_0")) {
         current = TRUE;
     }
 
     children = get_containers_or_children(then->rsc);
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *then_child = gIter->data;
         pe_resource_t *first_child = find_compatible_child(then_child,
                                                            first->rsc,
                                                            RSC_ROLE_UNKNOWN,
                                                            current);
         if (first_child == NULL && current) {
             crm_trace("Ignore");
 
         } else if (first_child == NULL) {
             crm_debug("No match found for %s (%d / %s / %s)", then_child->id, current, first->uuid, then->uuid);
 
             /* Me no like this hack - but what else can we do?
              *
              * If there is no-one active or about to be active
              *   on the same node as then_child, then they must
              *   not be allowed to start
              */
             if (pcmk_any_flags_set(type, pe_order_runnable_left|pe_order_implies_then) /* Mandatory */ ) {
                 pe_rsc_info(then->rsc, "Inhibiting %s from being active", then_child->id);
                 if (pcmk__assign_resource(then_child, NULL, true)) {
                     pcmk__set_updated_flags(changed, first, pcmk__updated_then);
                 }
             }
 
         } else {
             pe_action_t *first_action = NULL;
             pe_action_t *then_action = NULL;
 
             enum action_tasks task = clone_child_action(first);
             const char *first_task = task2text(task);
 
             pe__bundle_replica_t *first_replica = NULL;
             pe__bundle_replica_t *then_replica = NULL;
 
             first_replica = replica_for_container(first->rsc, first_child,
                                                   node);
             if (strstr(first->task, "stop") && first_replica && first_replica->child) {
                 /* Except for 'stopped' we should be looking at the
                  * in-container resource, actions for the child will
                  * happen later and are therefor more likely to align
                  * with the user's intent.
                  */
                 first_action = find_first_action(first_replica->child->actions,
                                                  NULL, task2text(task), node);
             } else {
                 first_action = find_first_action(first_child->actions, NULL, task2text(task), node);
             }
 
             then_replica = replica_for_container(then->rsc, then_child, node);
             if (strstr(then->task, "mote")
                 && then_replica && then_replica->child) {
                 /* Promote/demote actions will never be found for the
                  * container resource, look in the child instead
                  *
                  * Alternatively treat:
                  *  'XXXX then promote YYYY' as 'XXXX then start container for YYYY', and
                  *  'demote XXXX then stop YYYY' as 'stop container for XXXX then stop YYYY'
                  */
                 then_action = find_first_action(then_replica->child->actions,
                                                 NULL, then->task, node);
             } else {
                 then_action = find_first_action(then_child->actions, NULL, then->task, node);
             }
 
             if (first_action == NULL) {
                 if (!pcmk_is_set(first_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(first_task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (first)",
                             first_task, first_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (first)",
                               first_task, first_child->id,
                               pcmk_is_set(first_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             /* We're only interested if 'then' is neither stopping nor being demoted */ 
             if (then_action == NULL) {
                 if (!pcmk_is_set(then_child->flags, pe_rsc_orphan)
                     && !pcmk__str_any_of(then->task, RSC_STOP, RSC_DEMOTE, NULL)) {
                     crm_err("Internal error: No action found for %s in %s (then)",
                             then->task, then_child->id);
 
                 } else {
                     crm_trace("No action found for %s in %s%s (then)",
                               then->task, then_child->id,
                               pcmk_is_set(then_child->flags, pe_rsc_orphan)? " (ORPHAN)" : "");
                 }
                 continue;
             }
 
             if (order_actions(first_action, then_action, type)) {
                 crm_debug("Created constraint for %s (%d) -> %s (%d) %.6x",
                           first_action->uuid,
                           pcmk_is_set(first_action->flags, pe_action_optional),
                           then_action->uuid,
                           pcmk_is_set(then_action->flags, pe_action_optional),
                           type);
                 pcmk__set_updated_flags(changed, first,
                                         pcmk__updated_first|pcmk__updated_then);
             }
             if(first_action && then_action) {
                 changed |= then_child->cmds->update_ordered_actions(first_action,
                                                                     then_action,
                                                                     node,
                                                                     first_child->cmds->action_flags(first_action, node),
                                                                     filter,
                                                                     type,
                                                                     data_set);
             } else {
                 crm_err("Nothing found either for %s (%p) or %s (%p) %s",
                         first_child->id, first_action,
                         then_child->id, then_action, task2text(task));
             }
         }
     }
 
     if(children != then->rsc->children) {
         g_list_free(children);
     }
     return changed;
 }
 
 static bool
 can_interleave_actions(pe_action_t *first, pe_action_t *then)
 {
     bool interleave = FALSE;
     pe_resource_t *rsc = NULL;
     const char *interleave_s = NULL;
 
     if(first->rsc == NULL || then->rsc == NULL) {
         crm_trace("Not interleaving %s with %s (both must be resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc == then->rsc) {
         crm_trace("Not interleaving %s with %s (must belong to different resources)", first->uuid, then->uuid);
         return FALSE;
     } else if(first->rsc->variant < pe_clone || then->rsc->variant < pe_clone) {
         crm_trace("Not interleaving %s with %s (both sides must be clones or bundles)", first->uuid, then->uuid);
         return FALSE;
     }
 
     if (pcmk__ends_with(then->uuid, "_stop_0")
         || pcmk__ends_with(then->uuid, "_demote_0")) {
         rsc = first->rsc;
     } else {
         rsc = then->rsc;
     }
 
     interleave_s = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INTERLEAVE);
     interleave = crm_is_true(interleave_s);
     crm_trace("Interleave %s -> %s: %s (based on %s)",
               first->uuid, then->uuid, interleave ? "yes" : "no", rsc->id);
 
     return interleave;
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two actions, update the actions'
  * flags (and runnable_before members if appropriate) as appropriate for the
  * ordering. In some cases, the ordering could be disabled as well.
  *
  * \param[in] first     'First' action in an ordering
  * \param[in] then      'Then' action in an ordering
  * \param[in] node      If not NULL, limit scope of ordering to this node
  *                      (only used when interleaving instances)
  * \param[in] flags     Action flags for \p first for ordering purposes
  * \param[in] filter    Action flags to limit scope of certain updates (may
  *                      include pe_action_optional to affect only mandatory
  *                      actions, and pe_action_runnable to affect only
  *                      runnable actions)
  * \param[in] type      Group of enum pe_ordering flags to apply
  * \param[in] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 pcmk__multi_update_actions(pe_action_t *first, pe_action_t *then,
                            pe_node_t *node, uint32_t flags, uint32_t filter,
                            uint32_t type, pe_working_set_t *data_set)
 {
     uint32_t changed = pcmk__updated_none;
 
     crm_trace("%s -> %s", first->uuid, then->uuid);
 
     if(can_interleave_actions(first, then)) {
         changed = multi_update_interleave_actions(first, then, node, filter,
                                                   type, data_set);
 
     } else if(then->rsc) {
         GList *gIter = NULL;
         GList *children = NULL;
 
         // Handle the 'primitive' ordering case
         changed |= pcmk__update_ordered_actions(first, then, node, flags,
                                                 filter, type, data_set);
 
         // Now any children (or containers in the case of a bundle)
         children = get_containers_or_children(then->rsc);
         for (gIter = children; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *then_child = (pe_resource_t *) gIter->data;
             uint32_t then_child_changed = pcmk__updated_none;
             pe_action_t *then_child_action = find_first_action(then_child->actions, NULL, then->task, node);
 
             if (then_child_action) {
                 uint32_t then_child_flags = then_child->cmds->action_flags(then_child_action,
                                                                            node);
 
                 if (pcmk_is_set(then_child_flags, pe_action_runnable)) {
                     then_child_changed |= then_child->cmds->update_ordered_actions(first,
                                                                                    then_child_action,
                                                                                    node,
                                                                                    flags,
                                                                                    filter,
                                                                                    type,
                                                                                    data_set);
                 }
                 changed |= then_child_changed;
                 if (pcmk_is_set(then_child_changed, pcmk__updated_then)) {
                     for (GList *lpc = then_child_action->actions_after; lpc != NULL; lpc = lpc->next) {
                         pe_action_wrapper_t *next = (pe_action_wrapper_t *) lpc->data;
 
                         pcmk__update_action_for_orderings(next->action,
                                                           data_set);
                     }
                 }
             }
         }
 
         if(children != then->rsc->children) {
             g_list_free(children);
         }
     }
     return changed;
 }
 
 void
 pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     get_bundle_variant_data(bundle_data, rsc);
 
     pcmk__apply_location(constraint, rsc);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->container) {
             replica->container->cmds->rsc_location(replica->container,
                                                    constraint);
         }
         if (replica->ip) {
             replica->ip->cmds->rsc_location(replica->ip, constraint);
         }
     }
 
     if (bundle_data->child
         && ((constraint->role_filter == RSC_ROLE_UNPROMOTED)
             || (constraint->role_filter == RSC_ROLE_PROMOTED))) {
         bundle_data->child->cmds->rsc_location(bundle_data->child, constraint);
         bundle_data->child->rsc_location = g_list_prepend(bundle_data->child->rsc_location,
                                                           constraint);
     }
 }
 
 /*!
  * \internal
  * \brief Add a resource's actions to the transition graph
  *
  * \param[in] rsc  Resource whose actions should be added
  */
 void
 pcmk__bundle_expand(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (bundle_data->child) {
         bundle_data->child->cmds->add_actions_to_graph(bundle_data->child);
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->remote && replica->container
             && pe__bundle_needs_remote_name(replica->remote, rsc->cluster)) {
 
             /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
              * run pacemaker-remoted inside, without needing a separate IP for
              * the container. This is done by configuring the inner remote's
              * connection host as the magic string "#uname", then
              * replacing it with the underlying host when needed.
              */
             xmlNode *nvpair = get_xpath_object("//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']",
                                                replica->remote->xml, LOG_ERR);
             const char *calculated_addr = NULL;
 
             // Replace the value in replica->remote->xml (if appropriate)
             calculated_addr = pe__add_bundle_remote_name(replica->remote,
                                                          rsc->cluster,
                                                          nvpair, "value");
             if (calculated_addr) {
                 /* Since this is for the bundle as a resource, and not any
                  * particular action, replace the value in the default
                  * parameters (not evaluated for node). create_graph_action()
                  * will grab it from there to replace it in node-evaluated
                  * parameters.
                  */
                 GHashTable *params = pe_rsc_params(replica->remote,
                                                    NULL, rsc->cluster);
 
                 g_hash_table_replace(params,
                                      strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                      strdup(calculated_addr));
             } else {
                 /* The only way to get here is if the remote connection is
                  * neither currently running nor scheduled to run. That means we
                  * won't be doing any operations that require addr (only start
                  * requires it; we additionally use it to compare digests when
                  * unpacking status, promote, and migrate_from history, but
                  * that's already happened by this point).
                  */
                 crm_info("Unable to determine address for bundle %s remote connection",
                          rsc->id);
             }
         }
         if (replica->ip) {
             replica->ip->cmds->add_actions_to_graph(replica->ip);
         }
         if (replica->container) {
             replica->container->cmds->add_actions_to_graph(replica->container);
         }
         if (replica->remote) {
             replica->remote->cmds->add_actions_to_graph(replica->remote);
         }
     }
 }
 
 /*!
  * \internal
  *
  * \brief Schedule any probes needed for a resource on a node
  *
  * \param[in] rsc   Resource to create probe for
  * \param[in] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node)
 {
     bool any_created = false;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return false);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if ((replica->ip != NULL)
             && replica->ip->cmds->create_probe(replica->ip, node)) {
             any_created = true;
         }
         if ((replica->child != NULL) && (node->details == replica->node->details)
             && replica->child->cmds->create_probe(replica->child, node)) {
             any_created = true;
         }
         if ((replica->container != NULL)
             && replica->container->cmds->create_probe(replica->container,
                                                       node)) {
             any_created = true;
 
             /* If we're limited to one replica per host (due to
              * the lack of an IP range probably), then we don't
              * want any of our peer containers starting until
              * we've established that no other copies are already
              * running.
              *
              * Partly this is to ensure that nreplicas_per_host is
              * observed, but also to ensure that the containers
              * don't fail to start because the necessary port
              * mappings (which won't include an IP for uniqueness)
              * are already taken
              */
 
             for (GList *tIter = bundle_data->replicas;
                  tIter && (bundle_data->nreplicas_per_host == 1);
                  tIter = tIter->next) {
                 pe__bundle_replica_t *other = tIter->data;
 
                 if ((other != replica) && (other != NULL)
                     && (other->container != NULL)) {
 
                     pcmk__new_ordering(replica->container,
                                        pcmk__op_key(replica->container->id, RSC_STATUS, 0),
                                        NULL, other->container,
                                        pcmk__op_key(other->container->id, RSC_START, 0),
                                        NULL,
                                        pe_order_optional|pe_order_same_node,
                                        rsc->cluster);
                 }
             }
         }
         if ((replica->container != NULL) && (replica->remote != NULL)
             && replica->remote->cmds->create_probe(replica->remote, node)) {
 
             /* Do not probe the remote resource until we know where the
              * container is running. This is required for REMOTE_CONTAINER_HACK
              * to correctly probe remote resources.
              */
             char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS,
                                                0);
             pe_action_t *probe = find_first_action(replica->remote->actions,
                                                    probe_uuid, NULL, node);
 
             free(probe_uuid);
             if (probe != NULL) {
                 any_created = true;
                 crm_trace("Ordering %s probe on %s",
                           replica->remote->id, node->details->uname);
                 pcmk__new_ordering(replica->container,
                                    pcmk__op_key(replica->container->id, RSC_START, 0),
                                    NULL, replica->remote, NULL, probe,
                                    pe_order_probe, rsc->cluster);
             }
         }
     }
     return any_created;
 }
 
 void
 pcmk__bundle_append_meta(pe_resource_t *rsc, xmlNode *xml)
 {
 }
 
 void
 pcmk__output_bundle_actions(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (replica->ip != NULL) {
             replica->ip->cmds->output_actions(replica->ip);
         }
         if (replica->container != NULL) {
             replica->container->cmds->output_actions(replica->container);
         }
         if (replica->remote != NULL) {
             replica->remote->cmds->output_actions(replica->remote);
         }
         if (replica->child != NULL) {
             replica->child->cmds->output_actions(replica->child);
         }
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__bundle_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                              GList *all_rscs, GHashTable *utilization)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
     if (bundle_data->replicas == NULL) {
         return;
     }
 
     /* All bundle replicas are identical, so using the utilization of the first
      * is sufficient for any. Only the implicit container resource can have
      * utilization values.
      */
     replica = (pe__bundle_replica_t *) bundle_data->replicas->data;
     if (replica->container != NULL) {
         replica->container->cmds->add_utilization(replica->container, orig_rsc,
                                                   all_rscs, utilization);
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
 {
     return; // Bundles currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_clone.c b/lib/pacemaker/pcmk_sched_clone.c
index a6cad07601..76f144f599 100644
--- a/lib/pacemaker/pcmk_sched_clone.c
+++ b/lib/pacemaker/pcmk_sched_clone.c
@@ -1,1181 +1,1189 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define VARIANT_CLONE 1
 #include <lib/pengine/variant.h>
 
 static void append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all);
 
 static pe_node_t *
 can_run_instance(pe_resource_t * rsc, pe_node_t * node, int limit)
 {
     pe_node_t *local_node = NULL;
 
     if (node == NULL && rsc->allowed_nodes) {
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&local_node)) {
             can_run_instance(rsc, local_node, limit);
         }
         return NULL;
     }
 
     if (!node) {
         /* make clang analyzer happy */
         goto bail;
 
     } else if (!pcmk__node_available(node, false, false)) {
         goto bail;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         goto bail;
     }
 
     local_node = pcmk__top_allowed_node(rsc, node);
 
     if (local_node == NULL) {
         crm_warn("%s cannot run on %s: node not allowed", rsc->id, node->details->uname);
         goto bail;
 
     } else if (local_node->weight < 0) {
         common_update_score(rsc, node->details->id, local_node->weight);
         pe_rsc_trace(rsc, "%s cannot run on %s: Parent node weight doesn't allow it.",
                      rsc->id, node->details->uname);
 
     } else if (local_node->count < limit) {
         pe_rsc_trace(rsc, "%s can run on %s (already running %d)",
                      rsc->id, node->details->uname, local_node->count);
         return local_node;
 
     } else {
         pe_rsc_trace(rsc, "%s cannot run on %s: node full (%d >= %d)",
                      rsc->id, node->details->uname, local_node->count, limit);
     }
 
   bail:
     if (node) {
         common_update_score(rsc, node->details->id, -INFINITY);
     }
     return NULL;
 }
 
 static pe_node_t *
 allocate_instance(pe_resource_t *rsc, pe_node_t *prefer, gboolean all_coloc,
                   int limit, pe_working_set_t *data_set)
 {
     pe_node_t *chosen = NULL;
     GHashTable *backup = NULL;
 
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "Checking allocation of %s (preferring %s, using %s parent colocations)",
                  rsc->id, (prefer? prefer->details->uname: "none"),
                  (all_coloc? "all" : "some"));
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->fns->location(rsc, NULL, FALSE);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     /* Only include positive colocation preferences of dependent resources
      * if not every node will get a copy of the clone
      */
     append_parent_colocation(rsc->parent, rsc, all_coloc);
 
     if (prefer) {
         pe_node_t *local_prefer = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
 
         if (local_prefer == NULL || local_prefer->weight < 0) {
             pe_rsc_trace(rsc, "Not pre-allocating %s to %s - unavailable", rsc->id,
                          prefer->details->uname);
             return NULL;
         }
     }
 
     can_run_instance(rsc, NULL, limit);
 
     backup = pcmk__copy_node_table(rsc->allowed_nodes);
     pe_rsc_trace(rsc, "Allocating instance %s", rsc->id);
-    chosen = rsc->cmds->allocate(rsc, prefer);
+    chosen = rsc->cmds->assign(rsc, prefer);
     if (chosen && prefer && (chosen->details != prefer->details)) {
         crm_info("Not pre-allocating %s to %s because %s is better",
                  rsc->id, prefer->details->uname, chosen->details->uname);
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = backup;
         pcmk__unassign_resource(rsc);
         chosen = NULL;
         backup = NULL;
     }
     if (chosen) {
         pe_node_t *local_node = pcmk__top_allowed_node(rsc, chosen);
 
         if (local_node) {
             local_node->count++;
 
         } else if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             /* what to do? we can't enforce per-node limits in this case */
             pcmk__config_err("%s not found in %s (list of %d)",
                              chosen->details->id, rsc->parent->id,
                              g_hash_table_size(rsc->parent->allowed_nodes));
         }
     }
 
     if(backup) {
         g_hash_table_destroy(backup);
     }
     return chosen;
 }
 
 static void
 append_parent_colocation(pe_resource_t * rsc, pe_resource_t * child, gboolean all)
 {
 
     GList *gIter = NULL;
 
     gIter = rsc->rsc_cons;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
 
         if (all || cons->score < 0 || cons->score == INFINITY) {
             child->rsc_cons = g_list_prepend(child->rsc_cons, cons);
         }
     }
 
     gIter = rsc->rsc_cons_lhs;
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) gIter->data;
 
         if (!pcmk__colocation_has_influence(cons, child)) {
            continue;
         }
         if (all || cons->score < 0) {
             child->rsc_cons_lhs = g_list_prepend(child->rsc_cons_lhs, cons);
         }
     }
 }
 
 
 void
 distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                     int max, int per_host_max, pe_working_set_t * data_set);
 
 void
 distribute_children(pe_resource_t *rsc, GList *children, GList *nodes,
                     int max, int per_host_max, pe_working_set_t * data_set) 
 {
     int loop_max = 0;
     int allocated = 0;
     int available_nodes = 0;
     bool all_coloc = false;
 
     /* count now tracks the number of clones currently allocated */
     for(GList *nIter = nodes; nIter != NULL; nIter = nIter->next) {
         pe_node_t *node = nIter->data;
 
         node->count = 0;
         if (pcmk__node_available(node, false, false)) {
             available_nodes++;
         }
     }
 
     all_coloc = (max < available_nodes) ? true : false;
 
     if(available_nodes) {
         loop_max = max / available_nodes;
     }
     if (loop_max < 1) {
         loop_max = 1;
     }
 
     pe_rsc_debug(rsc, "Allocating up to %d %s instances to a possible %d nodes (at most %d per host, %d optimal)",
                  max, rsc->id, available_nodes, per_host_max, loop_max);
 
     /* Pre-allocate as many instances as we can to their current location */
     for (GList *gIter = children; gIter != NULL && allocated < max; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
         pe_node_t *child_node = NULL;
         pe_node_t *local_node = NULL;
 
         if ((child->running_on == NULL)
             || !pcmk_is_set(child->flags, pe_rsc_provisional)
             || pcmk_is_set(child->flags, pe_rsc_failed)) {
 
             continue;
         }
 
         child_node = pe__current_node(child);
         local_node = pcmk__top_allowed_node(child, child_node);
 
         pe_rsc_trace(rsc,
                      "Checking pre-allocation of %s to %s (%d remaining of %d)",
                      child->id, child_node->details->uname, max - allocated,
                      max);
 
         if (!pcmk__node_available(child_node, true, false)) {
             pe_rsc_trace(rsc, "Not pre-allocating because %s can not run %s",
                          child_node->details->uname, child->id);
             continue;
         }
 
         if ((local_node != NULL) && (local_node->count >= loop_max)) {
             pe_rsc_trace(rsc,
                          "Not pre-allocating because %s already allocated "
                          "optimal instances", child_node->details->uname);
             continue;
         }
 
         if (allocate_instance(child, child_node, all_coloc, per_host_max,
                               data_set)) {
             pe_rsc_trace(rsc, "Pre-allocated %s to %s", child->id,
                          child_node->details->uname);
             allocated++;
         }
     }
 
     pe_rsc_trace(rsc, "Done pre-allocating (%d of %d)", allocated, max);
 
     for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         if (child->running_on != NULL) {
             pe_node_t *child_node = pe__current_node(child);
             pe_node_t *local_node = pcmk__top_allowed_node(child, child_node);
 
             if (local_node == NULL) {
                 crm_err("%s is running on %s which isn't allowed",
                         child->id, child_node->details->uname);
             }
         }
 
         if (!pcmk_is_set(child->flags, pe_rsc_provisional)) {
         } else if (allocated >= max) {
             pe_rsc_debug(rsc, "Child %s not allocated - limit reached %d %d", child->id, allocated, max);
             resource_location(child, NULL, -INFINITY, "clone:limit_reached", data_set);
         } else {
             if (allocate_instance(child, NULL, all_coloc, per_host_max,
                                   data_set)) {
                 allocated++;
             }
         }
     }
 
     pe_rsc_debug(rsc, "Allocated %d %s instances of a possible %d",
                  allocated, rsc->id, max);
 }
 
-
+/*!
+ * \internal
+ * \brief Assign a clone resource to a node
+ *
+ * \param[in] rsc     Resource to assign to a node
+ * \param[in] prefer  Node to prefer, if all else is equal
+ *
+ * \return Node that \p rsc is assigned to, if assigned entirely to one node
+ */
 pe_node_t *
 pcmk__clone_allocate(pe_resource_t *rsc, pe_node_t *prefer)
 {
     GList *nodes = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return NULL;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__add_promotion_scores(rsc);
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
 
     /* This information is used by pcmk__cmp_instance() when deciding the order
      * in which to assign clone instances to nodes.
      */
     for (GList *gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         pe_rsc_trace(rsc, "%s: Allocating %s first",
                      rsc->id, constraint->primary->id);
-        constraint->primary->cmds->allocate(constraint->primary, prefer);
+        constraint->primary->cmds->assign(constraint->primary, prefer);
     }
 
     for (GList *gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         if (pcmk__colocation_has_influence(constraint, NULL)) {
             pe_resource_t *dependent = constraint->dependent;
             const char *attr = constraint->node_attribute;
             const float factor = constraint->score / (float) INFINITY;
             const uint32_t flags = pcmk__coloc_select_active
                                    |pcmk__coloc_select_nonnegative;
 
             dependent->cmds->add_colocated_node_scores(dependent, rsc->id,
                                                        &rsc->allowed_nodes,
                                                        attr, factor, flags);
         }
     }
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     nodes = g_hash_table_get_values(rsc->allowed_nodes);
     nodes = pcmk__sort_nodes(nodes, NULL);
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance);
     distribute_children(rsc, rsc->children, nodes, clone_data->clone_max,
                         clone_data->clone_node_max, rsc->cluster);
     g_list_free(nodes);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__set_instance_roles(rsc);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_provisional|pe_rsc_allocating);
     pe_rsc_trace(rsc, "Done allocating %s", rsc->id);
     return NULL;
 }
 
 static void
 clone_update_pseudo_status(pe_resource_t * rsc, gboolean * stopping, gboolean * starting,
                            gboolean * active)
 {
     GList *gIter = NULL;
 
     if (rsc->children) {
 
         gIter = rsc->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child = (pe_resource_t *) gIter->data;
 
             clone_update_pseudo_status(child, stopping, starting, active);
         }
 
         return;
     }
 
     CRM_ASSERT(active != NULL);
     CRM_ASSERT(starting != NULL);
     CRM_ASSERT(stopping != NULL);
 
     if (rsc->running_on) {
         *active = TRUE;
     }
 
     gIter = rsc->actions;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (*starting && *stopping) {
             return;
 
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             pe_rsc_trace(rsc, "Skipping optional: %s", action->uuid);
             continue;
 
         } else if (!pcmk_any_flags_set(action->flags,
                                        pe_action_pseudo|pe_action_runnable)) {
             pe_rsc_trace(rsc, "Skipping unrunnable: %s", action->uuid);
             continue;
 
         } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)) {
             pe_rsc_trace(rsc, "Stopping due to: %s", action->uuid);
             *stopping = TRUE;
 
         } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)) {
             if (!pcmk_is_set(action->flags, pe_action_runnable)) {
                 pe_rsc_trace(rsc, "Skipping pseudo-op: %s run=%d, pseudo=%d",
                              action->uuid,
                              pcmk_is_set(action->flags, pe_action_runnable),
                              pcmk_is_set(action->flags, pe_action_pseudo));
             } else {
                 pe_rsc_trace(rsc, "Starting due to: %s", action->uuid);
                 pe_rsc_trace(rsc, "%s run=%d, pseudo=%d",
                              action->uuid,
                              pcmk_is_set(action->flags, pe_action_runnable),
                              pcmk_is_set(action->flags, pe_action_pseudo));
                 *starting = TRUE;
             }
         }
     }
 }
 
 static pe_action_t *
 find_rsc_action(pe_resource_t *rsc, const char *task)
 {
     pe_action_t *match = NULL;
     GList *actions = pe__resource_actions(rsc, NULL, task, FALSE);
 
     for (GList *item = actions; item != NULL; item = item->next) {
         pe_action_t *op = (pe_action_t *) item->data;
 
         if (!pcmk_is_set(op->flags, pe_action_optional)) {
             if (match != NULL) {
                 // More than one match, don't return any
                 match = NULL;
                 break;
             }
             match = op;
         }
     }
     g_list_free(actions);
     return match;
 }
 
 static void
 child_ordering_constraints(pe_resource_t * rsc, pe_working_set_t * data_set)
 {
     pe_action_t *stop = NULL;
     pe_action_t *start = NULL;
     pe_action_t *last_stop = NULL;
     pe_action_t *last_start = NULL;
     GList *gIter = NULL;
 
     if (!pe__clone_is_ordered(rsc)) {
         return;
     }
 
     /* we have to maintain a consistent sorted child list when building order constraints */
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
 
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         stop = find_rsc_action(child, RSC_STOP);
         if (stop) {
             if (last_stop) {
                 /* child/child relative stop */
                 order_actions(stop, last_stop, pe_order_optional);
             }
             last_stop = stop;
         }
 
         start = find_rsc_action(child, RSC_START);
         if (start) {
             if (last_start) {
                 /* child/child relative start */
                 order_actions(last_start, start, pe_order_optional);
             }
             last_start = start;
         }
     }
 }
 
 void
 clone_create_actions(pe_resource_t *rsc)
 {
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     pe_rsc_debug(rsc, "Creating actions for clone %s", rsc->id);
     clone_create_pseudo_actions(rsc, rsc->children, &clone_data->start_notify,
                                 &clone_data->stop_notify);
     child_ordering_constraints(rsc, rsc->cluster);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__create_promotable_actions(rsc);
     }
 }
 
 void
 clone_create_pseudo_actions(pe_resource_t *rsc, GList *children,
                             notify_data_t **start_notify,
                             notify_data_t **stop_notify)
 {
     gboolean child_active = FALSE;
     gboolean child_starting = FALSE;
     gboolean child_stopping = FALSE;
     gboolean allow_dependent_migrations = TRUE;
 
     pe_action_t *stop = NULL;
     pe_action_t *stopped = NULL;
 
     pe_action_t *start = NULL;
     pe_action_t *started = NULL;
 
     pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
 
     for (GList *gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         gboolean starting = FALSE;
         gboolean stopping = FALSE;
 
         child_rsc->cmds->create_actions(child_rsc);
         clone_update_pseudo_status(child_rsc, &stopping, &starting, &child_active);
         if (stopping && starting) {
             allow_dependent_migrations = FALSE;
         }
 
         child_stopping |= stopping;
         child_starting |= starting;
     }
 
     /* start */
     start = pe__new_rsc_pseudo_action(rsc, RSC_START, !child_starting, true);
     started = pe__new_rsc_pseudo_action(rsc, RSC_STARTED, !child_starting,
                                         false);
     started->priority = INFINITY;
 
     if (child_active || child_starting) {
         pe__set_action_flags(started, pe_action_runnable);
     }
 
     if (start_notify != NULL && *start_notify == NULL) {
         *start_notify = pe__clone_notif_pseudo_ops(rsc, RSC_START, start,
                                                    started);
     }
 
     /* stop */
     stop = pe__new_rsc_pseudo_action(rsc, RSC_STOP, !child_stopping, true);
     stopped = pe__new_rsc_pseudo_action(rsc, RSC_STOPPED, !child_stopping,
                                         true);
     stopped->priority = INFINITY;
     if (allow_dependent_migrations) {
         pe__set_action_flags(stop, pe_action_migrate_runnable);
     }
 
     if (stop_notify != NULL && *stop_notify == NULL) {
         *stop_notify = pe__clone_notif_pseudo_ops(rsc, RSC_STOP, stop, stopped);
 
         if (start_notify && *start_notify && *stop_notify) {
             order_actions((*stop_notify)->post_done, (*start_notify)->pre, pe_order_optional);
         }
     }
 }
 
 void
 clone_internal_constraints(pe_resource_t *rsc)
 {
     pe_resource_t *last_rsc = NULL;
     GList *gIter;
     bool ordered = pe__clone_is_ordered(rsc);
 
     pe_rsc_trace(rsc, "Internal constraints for %s", rsc->id);
     pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
                                  pe_order_optional);
     pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
                                  pe_order_runnable_left);
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
                                  pe_order_runnable_left);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__order_resource_actions(rsc, RSC_DEMOTED, rsc, RSC_STOP,
                                      pe_order_optional);
         pcmk__order_resource_actions(rsc, RSC_STARTED, rsc, RSC_PROMOTE,
                                      pe_order_runnable_left);
     }
 
     if (ordered) {
         /* we have to maintain a consistent sorted child list when building order constraints */
         rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     }
     for (gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->internal_constraints(child_rsc);
 
         pcmk__order_starts(rsc, child_rsc,
                            pe_order_runnable_left|pe_order_implies_first_printed);
         pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
                                      pe_order_implies_then_printed);
         if (ordered && (last_rsc != NULL)) {
             pcmk__order_starts(last_rsc, child_rsc, pe_order_optional);
         }
 
         pcmk__order_stops(rsc, child_rsc, pe_order_implies_first_printed);
         pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
                                      pe_order_implies_then_printed);
         if (ordered && (last_rsc != NULL)) {
             pcmk__order_stops(child_rsc, last_rsc, pe_order_optional);
         }
 
         last_rsc = child_rsc;
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         pcmk__order_promotable_instances(rsc);
     }
 }
 
 gboolean
 is_child_compatible(pe_resource_t *child_rsc, pe_node_t * local_node, enum rsc_role_e filter, gboolean current) 
 {
     pe_node_t *node = NULL;
     enum rsc_role_e next_role = child_rsc->fns->state(child_rsc, current);
 
     CRM_CHECK(child_rsc && local_node, return FALSE);
     if (is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
         /* We only want instances that haven't failed */
         node = child_rsc->fns->location(child_rsc, NULL, current);
     }
 
     if (filter != RSC_ROLE_UNKNOWN && next_role != filter) {
         crm_trace("Filtered %s", child_rsc->id);
         return FALSE;
     }
 
     if (node && (node->details == local_node->details)) {
         return TRUE;
 
     } else if (node) {
         crm_trace("%s - %s vs %s", child_rsc->id, node->details->uname,
                   local_node->details->uname);
 
     } else {
         crm_trace("%s - not allocated %d", child_rsc->id, current);
     }
     return FALSE;
 }
 
 pe_resource_t *
 find_compatible_child(pe_resource_t *local_child, pe_resource_t *rsc,
                       enum rsc_role_e filter, gboolean current)
 {
     pe_resource_t *pair = NULL;
     GList *gIter = NULL;
     GList *scratch = NULL;
     pe_node_t *local_node = NULL;
 
     local_node = local_child->fns->location(local_child, NULL, current);
     if (local_node) {
         return find_compatible_child_by_node(local_child, local_node, rsc, filter, current);
     }
 
     scratch = g_hash_table_get_values(local_child->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL);
 
     gIter = scratch;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = find_compatible_child_by_node(local_child, node, rsc, filter, current);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", local_child->id, rsc->id);
   done:
     g_list_free(scratch);
     return pair;
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in] dependent      Dependent resource in colocation
  * \param[in] primary        Primary resource in colocation
  * \param[in] colocation     Colocation constraint to apply
  * \param[in] for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__clone_apply_coloc_score(pe_resource_t *dependent, pe_resource_t *primary,
                               pcmk__colocation_t *colocation,
                               bool for_dependent)
 {
     GList *gIter = NULL;
     gboolean do_interleave = FALSE;
     const char *interleave_s = NULL;
 
     /* This should never be called for the clone itself as a dependent. Instead,
      * we add its colocation constraints to its instances and call the
      * apply_coloc_score() for the instances as dependents.
      */
     CRM_ASSERT(!for_dependent);
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
     CRM_CHECK(dependent->variant == pe_native, return);
 
     pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
                  colocation->id, dependent->id, primary->id, colocation->score);
 
     if (pcmk_is_set(primary->flags, pe_rsc_promotable)) {
         if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
             // We haven't placed the primary yet, so we can't apply colocation
             pe_rsc_trace(primary, "%s is still provisional", primary->id);
             return;
 
         } else if (colocation->primary_role == RSC_ROLE_UNKNOWN) {
             // This isn't a role-specfic colocation, so handle normally
             pe_rsc_trace(primary, "Handling %s as a clone colocation",
                          colocation->id);
 
         } else if (pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
             // We're placing the dependent
             pcmk__update_dependent_with_promotable(primary, dependent,
                                                    colocation);
             return;
 
         } else if (colocation->dependent_role == RSC_ROLE_PROMOTED) {
             // We're choosing roles for the dependent
             pcmk__update_promotable_dependent_priority(primary, dependent,
                                                        colocation);
             return;
         }
     }
 
     // Only the dependent needs to be marked for interleave
     interleave_s = g_hash_table_lookup(colocation->dependent->meta,
                                        XML_RSC_ATTR_INTERLEAVE);
     if (crm_is_true(interleave_s)
         && (colocation->dependent->variant > pe_group)) {
         /* @TODO Do we actually care about multiple primary copies sharing a
          * dependent copy anymore?
          */
         if (copies_per_node(colocation->dependent) != copies_per_node(colocation->primary)) {
             pcmk__config_err("Cannot interleave %s and %s because they do not "
                              "support the same number of instances per node",
                              colocation->dependent->id,
                              colocation->primary->id);
 
         } else {
             do_interleave = TRUE;
         }
     }
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary, "%s is still provisional", primary->id);
         return;
 
     } else if (do_interleave) {
         pe_resource_t *primary_instance = NULL;
 
         primary_instance = find_compatible_child(dependent, primary,
                                                  RSC_ROLE_UNKNOWN, FALSE);
         if (primary_instance != NULL) {
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_instance->id);
             dependent->cmds->apply_coloc_score(dependent, primary_instance,
                                                colocation, true);
 
         } else if (colocation->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true);
 
         } else {
             pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
                          dependent->id, primary->id);
         }
 
         return;
 
     } else if (colocation->score >= INFINITY) {
         GList *affected_nodes = NULL;
 
         gIter = primary->children;
         for (; gIter != NULL; gIter = gIter->next) {
             pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
             pe_node_t *chosen = child_rsc->fns->location(child_rsc, NULL, FALSE);
 
             if (chosen != NULL && is_set_recursive(child_rsc, pe_rsc_block, TRUE) == FALSE) {
                 pe_rsc_trace(primary, "Allowing %s: %s %d",
                              colocation->id, chosen->details->uname,
                              chosen->weight);
                 affected_nodes = g_list_prepend(affected_nodes, chosen);
             }
         }
 
         node_list_exclude(dependent->allowed_nodes, affected_nodes, FALSE);
         g_list_free(affected_nodes);
         return;
     }
 
     gIter = primary->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_coloc_score(dependent, child_rsc, colocation,
                                            false);
     }
 }
 
 enum action_tasks
 clone_child_action(pe_action_t * action)
 {
     enum action_tasks result = no_action;
     pe_resource_t *child = (pe_resource_t *) action->rsc->children->data;
 
     if (pcmk__strcase_any_of(action->task, "notify", "notified", NULL)) {
 
         /* Find the action we're notifying about instead */
 
         int stop = 0;
         char *key = action->uuid;
         int lpc = strlen(key);
 
         for (; lpc > 0; lpc--) {
             if (key[lpc] == '_' && stop == 0) {
                 stop = lpc;
 
             } else if (key[lpc] == '_') {
                 char *task_mutable = NULL;
 
                 lpc++;
                 task_mutable = strdup(key + lpc);
                 task_mutable[stop - lpc] = 0;
 
                 crm_trace("Extracted action '%s' from '%s'", task_mutable, key);
                 result = get_complex_task(child, task_mutable, TRUE);
                 free(task_mutable);
                 break;
             }
         }
 
     } else {
         result = get_complex_task(child, action->task, TRUE);
     }
     return result;
 }
 
 #define pe__clear_action_summary_flags(flags, action, flag) do {        \
         flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                      "Action summary", action->rsc->id, \
                                      flags, flag, #flag);               \
     } while (0)
 
 enum pe_action_flags
 summary_action_flags(pe_action_t * action, GList *children, pe_node_t * node)
 {
     GList *gIter = NULL;
     gboolean any_runnable = FALSE;
     gboolean check_runnable = TRUE;
     enum action_tasks task = clone_child_action(action);
     enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
     const char *task_s = task2text(task);
 
     for (gIter = children; gIter != NULL; gIter = gIter->next) {
         pe_action_t *child_action = NULL;
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         child_action = find_first_action(child->actions, NULL, task_s, child->children ? NULL : node);
         pe_rsc_trace(action->rsc, "Checking for %s in %s on %s (%s)", task_s, child->id,
                      node ? node->details->uname : "none", child_action?child_action->uuid:"NA");
         if (child_action) {
             enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
 
             if (pcmk_is_set(flags, pe_action_optional)
                 && !pcmk_is_set(child_flags, pe_action_optional)) {
                 pe_rsc_trace(child, "%s is mandatory because of %s", action->uuid,
                              child_action->uuid);
                 pe__clear_action_summary_flags(flags, action, pe_action_optional);
                 pe__clear_action_flags(action, pe_action_optional);
             }
             if (pcmk_is_set(child_flags, pe_action_runnable)) {
                 any_runnable = TRUE;
             }
         }
     }
 
     if (check_runnable && any_runnable == FALSE) {
         pe_rsc_trace(action->rsc, "%s is not runnable because no children are", action->uuid);
         pe__clear_action_summary_flags(flags, action, pe_action_runnable);
         if (node == NULL) {
             pe__clear_action_flags(action, pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 enum pe_action_flags
 clone_action_flags(pe_action_t * action, pe_node_t * node)
 {
     return summary_action_flags(action, action->rsc->children, node);
 }
 
 void
 clone_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     GList *gIter = rsc->children;
 
     pe_rsc_trace(rsc, "Processing location constraint %s for %s", constraint->id, rsc->id);
 
     pcmk__apply_location(constraint, rsc);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->rsc_location(child_rsc, constraint);
     }
 }
 
 /*!
  * \internal
  * \brief Add a resource's actions to the transition graph
  *
  * \param[in] rsc  Resource whose actions should be added
  */
 void
 clone_expand(pe_resource_t *rsc)
 {
     GList *gIter = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     g_list_foreach(rsc->actions, (GFunc) rsc->cmds->action_flags, NULL);
 
     pe__create_notifications(rsc, clone_data->start_notify);
     pe__create_notifications(rsc, clone_data->stop_notify);
     pe__create_notifications(rsc, clone_data->promote_notify);
     pe__create_notifications(rsc, clone_data->demote_notify);
 
     /* Now that the notifcations have been created we can expand the children */
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->add_actions_to_graph(child_rsc);
     }
 
     pcmk__add_rsc_actions_to_graph(rsc);
 
     /* The notifications are in the graph now, we can destroy the notify_data */
     pe__free_notification_data(clone_data->demote_notify);
     clone_data->demote_notify = NULL;
     pe__free_notification_data(clone_data->stop_notify);
     clone_data->stop_notify = NULL;
     pe__free_notification_data(clone_data->start_notify);
     clone_data->start_notify = NULL;
     pe__free_notification_data(clone_data->promote_notify);
     clone_data->promote_notify = NULL;
 }
 
 // Check whether a resource or any of its children is known on node
 static bool
 rsc_known_on(const pe_resource_t *rsc, const pe_node_t *node)
 {
     if (rsc->children) {
         for (GList *child_iter = rsc->children; child_iter != NULL;
              child_iter = child_iter->next) {
 
             pe_resource_t *child = (pe_resource_t *) child_iter->data;
 
             if (rsc_known_on(child, node)) {
                 return TRUE;
             }
         }
 
     } else if (rsc->known_on) {
         GHashTableIter iter;
         pe_node_t *known_node = NULL;
 
         g_hash_table_iter_init(&iter, rsc->known_on);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &known_node)) {
             if (node->details == known_node->details) {
                 return TRUE;
             }
         }
     }
     return FALSE;
 }
 
 // Look for an instance of clone that is known on node
 static pe_resource_t *
 find_instance_on(const pe_resource_t *clone, const pe_node_t *node)
 {
     for (GList *gIter = clone->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
 
         if (rsc_known_on(child, node)) {
             return child;
         }
     }
     return NULL;
 }
 
 // For anonymous clones, only a single instance needs to be probed
 static bool
 probe_anonymous_clone(pe_resource_t *rsc, pe_node_t *node,
                       pe_working_set_t *data_set)
 {
     // First, check if we probed an instance on this node last time
     pe_resource_t *child = find_instance_on(rsc, node);
 
     // Otherwise, check if we plan to start an instance on this node
     if (child == NULL) {
         for (GList *child_iter = rsc->children; child_iter && !child;
              child_iter = child_iter->next) {
 
             pe_node_t *local_node = NULL;
             pe_resource_t *child_rsc = (pe_resource_t *) child_iter->data;
 
             if (child_rsc) { /* make clang analyzer happy */
                 local_node = child_rsc->fns->location(child_rsc, NULL, FALSE);
                 if (local_node && (local_node->details == node->details)) {
                     child = child_rsc;
                 }
             }
         }
     }
 
     // Otherwise, use the first clone instance
     if (child == NULL) {
         child = rsc->children->data;
     }
     CRM_ASSERT(child);
     return child->cmds->create_probe(child, node);
 }
 
 /*!
  * \internal
  *
  * \brief Schedule any probes needed for a resource on a node
  *
  * \param[in] rsc   Resource to create probe for
  * \param[in] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 clone_create_probe(pe_resource_t *rsc, pe_node_t *node)
 {
     CRM_ASSERT(rsc);
 
     rsc->children = g_list_sort(rsc->children, pcmk__cmp_instance_number);
     if (rsc->children == NULL) {
         pe_warn("Clone %s has no children", rsc->id);
         return false;
     }
 
     if (rsc->exclusive_discover) {
         pe_node_t *allowed = g_hash_table_lookup(rsc->allowed_nodes, node->details->id);
         if (allowed && allowed->rsc_discover_mode != pe_discover_exclusive) {
             /* exclusive discover is enabled and this node is not marked
              * as a node this resource should be discovered on
              *
              * remove the node from allowed_nodes so that the
              * notification contains only nodes that we might ever run
              * on
              */
             g_hash_table_remove(rsc->allowed_nodes, node->details->id);
 
             /* Bit of a shortcut - might as well take it */
             return false;
         }
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         return pcmk__probe_resource_list(rsc->children, node);
     } else {
         return probe_anonymous_clone(rsc, node, rsc->cluster);
     }
 }
 
 void
 clone_append_meta(pe_resource_t * rsc, xmlNode * xml)
 {
     char *name = NULL;
     clone_variant_data_t *clone_data = NULL;
 
     get_clone_variant_data(clone_data, rsc);
 
     name = crm_meta_name(XML_RSC_ATTR_UNIQUE);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_unique));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_NOTIFY);
     crm_xml_add(xml, name, pe__rsc_bool_str(rsc, pe_rsc_notify));
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_MAX);
     crm_xml_add_int(xml, name, clone_data->clone_max);
     free(name);
 
     name = crm_meta_name(XML_RSC_ATTR_INCARNATION_NODEMAX);
     crm_xml_add_int(xml, name, clone_data->clone_node_max);
     free(name);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_promotable)) {
         int promoted_max = pe__clone_promoted_max(rsc);
         int promoted_node_max = pe__clone_promoted_node_max(rsc);
 
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_MAX);
         crm_xml_add_int(xml, name, promoted_max);
         free(name);
 
         name = crm_meta_name(XML_RSC_ATTR_PROMOTED_NODEMAX);
         crm_xml_add_int(xml, name, promoted_node_max);
         free(name);
 
         /* @COMPAT Maintain backward compatibility with resource agents that
          * expect the old names (deprecated since 2.0.0).
          */
         name = crm_meta_name(PCMK_XA_PROMOTED_MAX_LEGACY);
         crm_xml_add_int(xml, name, promoted_max);
         free(name);
 
         name = crm_meta_name(PCMK_XA_PROMOTED_NODE_MAX_LEGACY);
         crm_xml_add_int(xml, name, promoted_node_max);
         free(name);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__clone_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                             GList *all_rscs, GHashTable *utilization)
 {
     bool existing = false;
     pe_resource_t *child = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     // Look for any child already existing in the list
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         child = (pe_resource_t *) iter->data;
         if (g_list_find(all_rscs, child)) {
             existing = true; // Keep checking remaining children
         } else {
             // If this is a clone of a group, look for group's members
             for (GList *member_iter = child->children; member_iter != NULL;
                  member_iter = member_iter->next) {
 
                 pe_resource_t *member = (pe_resource_t *) member_iter->data;
 
                 if (g_list_find(all_rscs, member) != NULL) {
                     // Add *child's* utilization, not group member's
                     child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                                  utilization);
                     existing = true;
                     break;
                 }
             }
         }
     }
 
     if (!existing && (rsc->children != NULL)) {
         // If nothing was found, still add first child's utilization
         child = (pe_resource_t *) rsc->children->data;
 
         child->cmds->add_utilization(child, orig_rsc, all_rscs, utilization);
     }
 }
 
 // Clone implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__clone_shutdown_lock(pe_resource_t *rsc)
 {
     return; // Clones currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_group.c b/lib/pacemaker/pcmk_sched_group.c
index 96a27d64dd..24d24b8c6b 100644
--- a/lib/pacemaker/pcmk_sched_group.c
+++ b/lib/pacemaker/pcmk_sched_group.c
@@ -1,728 +1,737 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define VARIANT_GROUP 1
 #include <lib/pengine/variant.h>
 
 /*!
  * \internal
  * \brief Expand a group's colocations to its members
  *
  * \param[in,out] rsc  Group resource
  */
 static void
 expand_group_colocations(pe_resource_t *rsc)
 {
     group_variant_data_t *group_data = NULL;
     pe_resource_t *member = NULL;
     bool any_unmanaged = false;
 
     get_group_variant_data(group_data, rsc);
 
     // Treat "group with R" colocations as "first member with R"
     member = group_data->first_child;
     member->rsc_cons = g_list_concat(member->rsc_cons, rsc->rsc_cons);
 
 
     /* The above works for the whole group because each group member is
      * colocated with the previous one.
      *
      * However, there is a special case when a group has a mandatory colocation
      * with a resource that can't start. In that case,
      * pcmk__block_colocated_starts() will ensure that dependent resources in
      * mandatory colocations (i.e. the first member for groups) can't start
      * either. But if any group member is unmanaged and already started, the
      * internal group colocations are no longer sufficient to make that apply to
      * later members.
      *
      * To handle that case, add mandatory colocations to each member after the
      * first.
      */
     any_unmanaged = !pcmk_is_set(member->flags, pe_rsc_managed);
     for (GList *item = rsc->children->next; item != NULL; item = item->next) {
         member = item->data;
         if (any_unmanaged) {
             for (GList *cons_iter = rsc->rsc_cons; cons_iter != NULL;
                  cons_iter = cons_iter->next) {
 
                 pcmk__colocation_t *constraint = (pcmk__colocation_t *) cons_iter->data;
 
                 if (constraint->score == INFINITY) {
                     member->rsc_cons = g_list_prepend(member->rsc_cons, constraint);
                 }
             }
         } else if (!pcmk_is_set(member->flags, pe_rsc_managed)) {
             any_unmanaged = true;
         }
     }
 
     rsc->rsc_cons = NULL;
 
     // Treat "R with group" colocations as "R with last member"
     member = group_data->last_child;
     member->rsc_cons_lhs = g_list_concat(member->rsc_cons_lhs,
                                          rsc->rsc_cons_lhs);
     rsc->rsc_cons_lhs = NULL;
 }
 
+/*!
+ * \internal
+ * \brief Assign a group resource to a node
+ *
+ * \param[in] rsc     Resource to assign to a node
+ * \param[in] prefer  Node to prefer, if all else is equal
+ *
+ * \return Node that \p rsc is assigned to, if assigned entirely to one node
+ */
 pe_node_t *
 pcmk__group_allocate(pe_resource_t *rsc, pe_node_t *prefer)
 {
     pe_node_t *node = NULL;
     pe_node_t *group_node = NULL;
     GList *gIter = NULL;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to;
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     if (group_data->first_child == NULL) {
         // Nothing to allocate
         pe__clear_resource_flags(rsc, pe_rsc_provisional);
         return NULL;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     rsc->role = group_data->first_child->role;
 
     expand_group_colocations(rsc);
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     gIter = rsc->children;
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         pe_rsc_trace(rsc, "Allocating group %s member %s",
                      rsc->id, child_rsc->id);
-        node = child_rsc->cmds->allocate(child_rsc, prefer);
+        node = child_rsc->cmds->assign(child_rsc, prefer);
         if (group_node == NULL) {
             group_node = node;
         }
     }
 
     pe__set_next_role(rsc, group_data->first_child->next_role,
                       "first group member");
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
 
     if (group_data->colocated) {
         return group_node;
     }
     return NULL;
 }
 
 void group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child);
 
 void
 group_create_actions(pe_resource_t *rsc)
 {
     pe_action_t *op = NULL;
     const char *value = NULL;
     GList *gIter = rsc->children;
 
     pe_rsc_trace(rsc, "Creating actions for %s", rsc->id);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->create_actions(child_rsc);
         group_update_pseudo_status(rsc, child_rsc);
     }
 
     op = start_action(rsc, NULL, TRUE /* !group_data->child_starting */ );
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     op = custom_action(rsc, started_key(rsc),
                        RSC_STARTED, NULL,
                        TRUE /* !group_data->child_starting */ ,
                        TRUE, rsc->cluster);
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     op = stop_action(rsc, NULL, TRUE /* !group_data->child_stopping */ );
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     op = custom_action(rsc, stopped_key(rsc),
                        RSC_STOPPED, NULL,
                        TRUE /* !group_data->child_stopping */ ,
                        TRUE, rsc->cluster);
     pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
     value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_PROMOTABLE);
     if (crm_is_true(value)) {
         op = custom_action(rsc, demote_key(rsc), RSC_DEMOTE, NULL, TRUE, TRUE,
                            rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
         op = custom_action(rsc, demoted_key(rsc), RSC_DEMOTED, NULL, TRUE, TRUE,
                            rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
         op = custom_action(rsc, promote_key(rsc), RSC_PROMOTE, NULL, TRUE, TRUE,
                            rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
 
         op = custom_action(rsc, promoted_key(rsc), RSC_PROMOTED, NULL, TRUE,
                            TRUE, rsc->cluster);
         pe__set_action_flags(op, pe_action_pseudo|pe_action_runnable);
     }
 }
 
 void
 group_update_pseudo_status(pe_resource_t * parent, pe_resource_t * child)
 {
     GList *gIter = child->actions;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, parent);
 
     if (group_data->ordered == FALSE) {
         /* If this group is not ordered, then leave the meta-actions as optional */
         return;
     }
 
     if (group_data->child_stopping && group_data->child_starting) {
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_action_t *action = (pe_action_t *) gIter->data;
 
         if (pcmk_is_set(action->flags, pe_action_optional)) {
             continue;
         }
         if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_casei)
             && pcmk_is_set(action->flags, pe_action_runnable)) {
 
             group_data->child_stopping = TRUE;
             pe_rsc_trace(action->rsc, "Based on %s the group is stopping", action->uuid);
 
         } else if (pcmk__str_eq(RSC_START, action->task, pcmk__str_casei)
                    && pcmk_is_set(action->flags, pe_action_runnable)) {
             group_data->child_starting = TRUE;
             pe_rsc_trace(action->rsc, "Based on %s the group is starting", action->uuid);
         }
     }
 }
 
 void
 group_internal_constraints(pe_resource_t *rsc)
 {
     GList *gIter = rsc->children;
     pe_resource_t *last_rsc = NULL;
     pe_resource_t *last_active = NULL;
     pe_resource_t *top = uber_parent(rsc);
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     pcmk__order_resource_actions(rsc, RSC_STOPPED, rsc, RSC_START,
                                  pe_order_optional);
     pcmk__order_resource_actions(rsc, RSC_START, rsc, RSC_STARTED,
                                  pe_order_runnable_left);
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_STOPPED,
                                  pe_order_runnable_left);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
         int stop = pe_order_none;
         int stopped = pe_order_implies_then_printed;
         int start = pe_order_implies_then | pe_order_runnable_left;
         int started =
             pe_order_runnable_left | pe_order_implies_then | pe_order_implies_then_printed;
 
         child_rsc->cmds->internal_constraints(child_rsc);
 
         if (last_rsc == NULL) {
             if (group_data->ordered) {
                 pe__set_order_flags(stop, pe_order_optional);
                 stopped = pe_order_implies_then;
             }
 
         } else if (group_data->colocated) {
             pcmk__new_colocation("group:internal_colocation", NULL, INFINITY,
                                  child_rsc, last_rsc, NULL, NULL,
                                  pcmk_is_set(child_rsc->flags, pe_rsc_critical),
                                  rsc->cluster);
         }
 
         if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, child_rsc, RSC_DEMOTE,
                                          stop|pe_order_implies_first_printed);
 
             pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, rsc,
                                          RSC_DEMOTED, stopped);
 
             pcmk__order_resource_actions(child_rsc, RSC_PROMOTE, rsc,
                                          RSC_PROMOTED, started);
 
             pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
                                          RSC_PROMOTE,
                                          pe_order_implies_first_printed);
 
         }
 
         pcmk__order_starts(rsc, child_rsc, pe_order_implies_first_printed);
         pcmk__order_stops(rsc, child_rsc,
                           stop|pe_order_implies_first_printed);
 
         pcmk__order_resource_actions(child_rsc, RSC_STOP, rsc, RSC_STOPPED,
                                      stopped);
         pcmk__order_resource_actions(child_rsc, RSC_START, rsc, RSC_STARTED,
                                      started);
 
         if (group_data->ordered == FALSE) {
             pcmk__order_starts(rsc, child_rsc,
                                start|pe_order_implies_first_printed);
             if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
                 pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
                                              RSC_PROMOTE,
                                              start|pe_order_implies_first_printed);
             }
 
         } else if (last_rsc != NULL) {
             pcmk__order_starts(last_rsc, child_rsc, start);
             pcmk__order_stops(child_rsc, last_rsc,
                               pe_order_optional|pe_order_restart);
 
             if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
                 pcmk__order_resource_actions(last_rsc, RSC_PROMOTE, child_rsc,
                                              RSC_PROMOTE, start);
                 pcmk__order_resource_actions(child_rsc, RSC_DEMOTE, last_rsc,
                                              RSC_DEMOTE, pe_order_optional);
             }
 
         } else {
             pcmk__order_starts(rsc, child_rsc, pe_order_none);
             if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
                 pcmk__order_resource_actions(rsc, RSC_PROMOTE, child_rsc,
                                              RSC_PROMOTE, pe_order_none);
             }
         }
 
         /* Look for partially active groups
          * Make sure they still shut down in sequence
          */
         if (child_rsc->running_on) {
             if (group_data->ordered
                 && last_rsc
                 && last_rsc->running_on == NULL && last_active && last_active->running_on) {
                 pcmk__order_stops(child_rsc, last_active, pe_order_optional);
             }
             last_active = child_rsc;
         }
 
         last_rsc = child_rsc;
     }
 
     if (group_data->ordered && last_rsc != NULL) {
         int stop_stop_flags = pe_order_implies_then;
         int stop_stopped_flags = pe_order_optional;
 
         pcmk__order_stops(rsc, last_rsc, stop_stop_flags);
         pcmk__order_resource_actions(last_rsc, RSC_STOP, rsc, RSC_STOPPED,
                                      stop_stopped_flags);
 
         if (pcmk_is_set(top->flags, pe_rsc_promotable)) {
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, last_rsc, RSC_DEMOTE,
                                          stop_stop_flags);
             pcmk__order_resource_actions(last_rsc, RSC_DEMOTE, rsc, RSC_DEMOTED,
                                          stop_stopped_flags);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in] dependent      Dependent resource in colocation
  * \param[in] primary        Primary resource in colocation
  * \param[in] colocation     Colocation constraint to apply
  * \param[in] for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__group_apply_coloc_score(pe_resource_t *dependent, pe_resource_t *primary,
                               pcmk__colocation_t *colocation,
                               bool for_dependent)
 {
     GList *gIter = NULL;
     group_variant_data_t *group_data = NULL;
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
 
     if (!for_dependent) {
         goto for_primary;
     }
 
     gIter = dependent->children;
     pe_rsc_trace(dependent, "Processing constraints from %s", dependent->id);
 
     get_group_variant_data(group_data, dependent);
 
     if (group_data->colocated) {
         group_data->first_child->cmds->apply_coloc_score(group_data->first_child,
                                                          primary, colocation,
                                                          true);
         return;
 
     } else if (colocation->score >= INFINITY) {
         pcmk__config_err("%s: Cannot perform mandatory colocation "
                          "between non-colocated group and %s",
                          dependent->id, primary->id);
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_coloc_score(child_rsc, primary, colocation,
                                            true);
     }
     return;
 
 for_primary:
     gIter = primary->children;
     get_group_variant_data(group_data, primary);
     CRM_CHECK(dependent->variant == pe_native, return);
 
     pe_rsc_trace(primary,
                  "Processing colocation %s (%s with group %s) for primary",
                  colocation->id, dependent->id, primary->id);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         return;
 
     } else if (group_data->colocated && group_data->first_child) {
         if (colocation->score >= INFINITY) {
             // Dependent can't start until group is fully up
             group_data->last_child->cmds->apply_coloc_score(dependent,
                                                             group_data->last_child,
                                                             colocation, false);
         } else {
             // Dependent can start as long as group is partially up
             group_data->first_child->cmds->apply_coloc_score(dependent,
                                                              group_data->first_child,
                                                              colocation, false);
         }
 
         return;
 
     } else if (colocation->score >= INFINITY) {
         pcmk__config_err("%s: Cannot perform mandatory colocation with"
                          " non-colocated group %s", dependent->id, primary->id);
         return;
     }
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->apply_coloc_score(dependent, child_rsc, colocation,
                                            false);
     }
 }
 
 enum pe_action_flags
 group_action_flags(pe_action_t * action, pe_node_t * node)
 {
     GList *gIter = NULL;
     enum pe_action_flags flags = (pe_action_optional | pe_action_runnable | pe_action_pseudo);
 
     for (gIter = action->rsc->children; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
         enum action_tasks task = get_complex_task(child, action->task, TRUE);
         const char *task_s = task2text(task);
         pe_action_t *child_action = find_first_action(child->actions, NULL, task_s, node);
 
         if (child_action) {
             enum pe_action_flags child_flags = child->cmds->action_flags(child_action, node);
 
             if (pcmk_is_set(flags, pe_action_optional)
                 && !pcmk_is_set(child_flags, pe_action_optional)) {
                 pe_rsc_trace(action->rsc, "%s is mandatory because of %s", action->uuid,
                              child_action->uuid);
                 pe__clear_raw_action_flags(flags, "group action",
                                            pe_action_optional);
                 pe__clear_action_flags(action, pe_action_optional);
             }
             if (!pcmk__str_eq(task_s, action->task, pcmk__str_casei)
                 && pcmk_is_set(flags, pe_action_runnable)
                 && !pcmk_is_set(child_flags, pe_action_runnable)) {
 
                 pe_rsc_trace(action->rsc, "%s is not runnable because of %s", action->uuid,
                              child_action->uuid);
                 pe__clear_raw_action_flags(flags, "group action",
                                            pe_action_runnable);
                 pe__clear_action_flags(action, pe_action_runnable);
             }
 
         } else if (task != stop_rsc && task != action_demote) {
             pe_rsc_trace(action->rsc, "%s is not runnable because of %s (not found in %s)",
                          action->uuid, task_s, child->id);
             pe__clear_raw_action_flags(flags, "group action",
                                        pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two actions, update the actions'
  * flags (and runnable_before members if appropriate) as appropriate for the
  * ordering. In some cases, the ordering could be disabled as well.
  *
  * \param[in] first     'First' action in an ordering
  * \param[in] then      'Then' action in an ordering
  * \param[in] node      If not NULL, limit scope of ordering to this node
  *                      (only used when interleaving instances)
  * \param[in] flags     Action flags for \p first for ordering purposes
  * \param[in] filter    Action flags to limit scope of certain updates (may
  *                      include pe_action_optional to affect only mandatory
  *                      actions, and pe_action_runnable to affect only
  *                      runnable actions)
  * \param[in] type      Group of enum pe_ordering flags to apply
  * \param[in] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 group_update_actions(pe_action_t *first, pe_action_t *then, pe_node_t *node,
                      uint32_t flags, uint32_t filter, uint32_t type,
                      pe_working_set_t *data_set)
 {
     GList *gIter = then->rsc->children;
     uint32_t changed = pcmk__updated_none;
 
     CRM_ASSERT(then->rsc != NULL);
     changed |= pcmk__update_ordered_actions(first, then, node, flags, filter,
                                             type, data_set);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child = (pe_resource_t *) gIter->data;
         pe_action_t *child_action = find_first_action(child->actions, NULL, then->task, node);
 
         if (child_action) {
             changed |= child->cmds->update_ordered_actions(first, child_action,
                                                            node, flags, filter,
                                                            type, data_set);
         }
     }
 
     return changed;
 }
 
 void
 group_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     GList *gIter = rsc->children;
     GList *saved = constraint->node_list_rh;
     GList *zero = pcmk__copy_node_list(constraint->node_list_rh, true);
     gboolean reset_scores = TRUE;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     pe_rsc_debug(rsc, "Processing rsc_location %s for %s", constraint->id, rsc->id);
 
     pcmk__apply_location(constraint, rsc);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) gIter->data;
 
         child_rsc->cmds->rsc_location(child_rsc, constraint);
         if (group_data->colocated && reset_scores) {
             reset_scores = FALSE;
             constraint->node_list_rh = zero;
         }
     }
 
     constraint->node_list_rh = saved;
     g_list_free_full(zero, free);
 }
 
 /*!
  * \internal
  * \brief Update nodes with scores of colocated resources' nodes
  *
  * Given a table of nodes and a resource, update the nodes' scores with the
  * scores of the best nodes matching the attribute used for each of the
  * resource's relevant colocations.
  *
  * \param[in,out] rsc      Resource to check colocations for
  * \param[in]     log_id   Resource ID to use in log messages
  * \param[in,out] nodes    Nodes to update
  * \param[in]     attr     Colocation attribute (NULL to use default)
  * \param[in]     factor   Incorporate scores multiplied by this factor
  * \param[in]     flags    Bitmask of enum pcmk__coloc_select values
  *
  * \note The caller remains responsible for freeing \p *nodes.
  */
 void
 pcmk__group_add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
                                       GHashTable **nodes, const char *attr,
                                       float factor, uint32_t flags)
 {
     GList *gIter = rsc->rsc_cons_lhs;
     pe_resource_t *member = NULL;
     group_variant_data_t *group_data = NULL;
 
     CRM_CHECK((rsc != NULL) && (nodes != NULL), return);
 
     if (log_id == NULL) {
         log_id = rsc->id;
     }
 
     get_group_variant_data(group_data, rsc);
 
     if (pcmk_is_set(rsc->flags, pe_rsc_merging)) {
         pe_rsc_info(rsc, "Breaking dependency loop with %s at %s",
                     rsc->id, log_id);
         return;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_merging);
 
     member = group_data->first_child;
     member->cmds->add_colocated_node_scores(member, log_id, nodes, attr,
                                             factor, flags);
 
     for (; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         pcmk__add_colocated_node_scores(constraint->dependent, rsc->id, nodes,
                                         constraint->node_attribute,
                                         constraint->score / (float) INFINITY,
                                         flags);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_merging);
 }
 
 void
 group_append_meta(pe_resource_t * rsc, xmlNode * xml)
 {
 }
 
 // Group implementation of resource_alloc_functions_t:colocated_resources()
 GList *
 pcmk__group_colocated_resources(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                 GList *colocated_rscs)
 {
     pe_resource_t *child_rsc = NULL;
     group_variant_data_t *group_data = NULL;
 
     get_group_variant_data(group_data, rsc);
 
     if (orig_rsc == NULL) {
         orig_rsc = rsc;
     }
 
     if (group_data->colocated || pe_rsc_is_clone(rsc->parent)) {
         /* This group has colocated members and/or is cloned -- either way,
          * add every child's colocated resources to the list.
          */
         for (GList *gIter = rsc->children; gIter != NULL; gIter = gIter->next) {
             child_rsc = (pe_resource_t *) gIter->data;
             colocated_rscs = child_rsc->cmds->colocated_resources(child_rsc,
                                                                   orig_rsc,
                                                                   colocated_rscs);
         }
 
     } else if (group_data->first_child != NULL) {
         /* This group's members are not colocated, and the group is not cloned,
          * so just add the first child's colocations to the list.
          */
         child_rsc = group_data->first_child;
         colocated_rscs = child_rsc->cmds->colocated_resources(child_rsc,
                                                               orig_rsc,
                                                               colocated_rscs);
     }
 
     // Now consider colocations where the group itself is specified
     colocated_rscs = pcmk__colocated_resources(rsc, orig_rsc, colocated_rscs);
 
     return colocated_rscs;
 }
 
 // Group implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__group_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                             GList *all_rscs, GHashTable *utilization)
 {
     group_variant_data_t *group_data = NULL;
     pe_resource_t *child = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     pe_rsc_trace(orig_rsc, "%s: Adding group %s as colocated utilization",
                  orig_rsc->id, rsc->id);
     get_group_variant_data(group_data, rsc);
     if (group_data->colocated || pe_rsc_is_clone(rsc->parent)) {
         // Every group member will be on same node, so sum all members
         for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
             child = (pe_resource_t *) iter->data;
 
             if (pcmk_is_set(child->flags, pe_rsc_provisional)
                 && (g_list_find(all_rscs, child) == NULL)) {
                 child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                              utilization);
             }
         }
 
     } else {
         // Just add first child's utilization
         child = group_data->first_child;
         if ((child != NULL)
             && pcmk_is_set(child->flags, pe_rsc_provisional)
             && (g_list_find(all_rscs, child) == NULL)) {
 
             child->cmds->add_utilization(child, orig_rsc, all_rscs,
                                          utilization);
         }
     }
 }
 
 // Group implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__group_shutdown_lock(pe_resource_t *rsc)
 {
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child = (pe_resource_t *) iter->data;
 
         child->cmds->shutdown_lock(child);
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_primitive.c b/lib/pacemaker/pcmk_sched_primitive.c
index 4ed6a03af0..7cf15083e9 100644
--- a/lib/pacemaker/pcmk_sched_primitive.c
+++ b/lib/pacemaker/pcmk_sched_primitive.c
@@ -1,1882 +1,1891 @@
 /*
  * Copyright 2004-2022 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 #include <crm/pengine/rules.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 static void Recurring(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                       pe_working_set_t *data_set);
 static void RecurringOp(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                         xmlNode *operation, pe_working_set_t *data_set);
 static void Recurring_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                               pe_working_set_t *data_set);
 static void RecurringOp_Stopped(pe_resource_t *rsc, pe_action_t *start, pe_node_t *node,
                                 xmlNode *operation, pe_working_set_t *data_set);
 
 gboolean DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set);
 static bool StopRsc(pe_resource_t *rsc, pe_node_t *next, bool optional);
 static bool StartRsc(pe_resource_t *rsc, pe_node_t *next, bool optional);
 static bool DemoteRsc(pe_resource_t *rsc, pe_node_t *next, bool optional);
 static bool PromoteRsc(pe_resource_t *rsc, pe_node_t *next, bool optional);
 static bool RoleError(pe_resource_t *rsc, pe_node_t *next, bool optional);
 static bool NullOp(pe_resource_t *rsc, pe_node_t *next, bool optional);
 
 /* This array says what the *next* role should be when transitioning from one
  * role to another. For example going from Stopped to Promoted, the next role is
  * RSC_ROLE_UNPROMOTED, because the resource must be started before being promoted.
  * The current state then becomes Started, which is fed into this array again,
  * giving a next role of RSC_ROLE_PROMOTED.
  */
 static enum rsc_role_e rsc_state_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
 /* Current state  Next state*/
 /*                 Unknown           Stopped           Started           Unpromoted           Promoted */
 /* Unknown */    { RSC_ROLE_UNKNOWN, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED,    RSC_ROLE_STOPPED },
 /* Stopped */    { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED },
 /* Started */    { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STARTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
 /* Unpromoted */ { RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
 /* Promoted  */  { RSC_ROLE_STOPPED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_UNPROMOTED, RSC_ROLE_PROMOTED },
 };
 
 typedef bool (*rsc_transition_fn)(pe_resource_t *rsc, pe_node_t *next,
                                   bool optional);
 
 // This array picks the function needed to transition from one role to another
 static rsc_transition_fn rsc_action_matrix[RSC_ROLE_MAX][RSC_ROLE_MAX] = {
 /* Current state   Next state                                            */
 /*                 Unknown    Stopped    Started    Unpromoted Promoted  */
 /* Unknown */    { RoleError, StopRsc,   RoleError, RoleError, RoleError,    },
 /* Stopped */    { RoleError, NullOp,    StartRsc,  StartRsc,  RoleError,    },
 /* Started */    { RoleError, StopRsc,   NullOp,    NullOp,    PromoteRsc,   },
 /* Unpromoted */ { RoleError, StopRsc,   StopRsc,   NullOp,    PromoteRsc,   },
 /* Promoted  */  { RoleError, DemoteRsc, DemoteRsc, DemoteRsc, NullOp,       },
 };
 
 /*!
  * \internal
  * \brief Get a list of a resource's allowed nodes sorted by node weight
  *
  * \param[in] rsc  Resource to check
  *
  * \return List of allowed nodes sorted by node weight
  */
 static GList *
 sorted_allowed_nodes(const pe_resource_t *rsc)
 {
     if (rsc->allowed_nodes != NULL) {
         GList *nodes = g_hash_table_get_values(rsc->allowed_nodes);
 
         if (nodes != NULL) {
             return pcmk__sort_nodes(nodes, pe__current_node(rsc));
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Assign a resource to its best allowed node, if possible
  *
  * \param[in] rsc     Resource to choose a node for
  * \param[in] prefer  If not NULL, prefer this node when all else equal
  *
  * \return true if \p rsc could be assigned to a node, otherwise false
  */
 static bool
 assign_best_node(pe_resource_t *rsc, pe_node_t *prefer)
 {
     GList *nodes = NULL;
     pe_node_t *chosen = NULL;
     pe_node_t *best = NULL;
     bool result = false;
 
     pcmk__ban_insufficient_capacity(rsc, &prefer);
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         // We've already finished assignment of resources to nodes
         return rsc->allocated_to != NULL;
     }
 
     // Sort allowed nodes by weight
     nodes = sorted_allowed_nodes(rsc);
     if (nodes != NULL) {
         best = (pe_node_t *) nodes->data; // First node has best score
     }
 
     if ((prefer != NULL) && (nodes != NULL)) {
         // Get the allowed node version of prefer
         chosen = g_hash_table_lookup(rsc->allowed_nodes, prefer->details->id);
 
         if (chosen == NULL) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unknown",
                          prefer->details->uname, rsc->id);
 
         /* Favor the preferred node as long as its weight is at least as good as
          * the best allowed node's.
          *
          * An alternative would be to favor the preferred node even if the best
          * node is better, when the best node's weight is less than INFINITY.
          */
         } else if (chosen->weight < best->weight) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unsuitable",
                          chosen->details->uname, rsc->id);
             chosen = NULL;
 
         } else if (!pcmk__node_available(chosen, true, false)) {
             pe_rsc_trace(rsc, "Preferred node %s for %s was unavailable",
                          chosen->details->uname, rsc->id);
             chosen = NULL;
 
         } else {
             pe_rsc_trace(rsc,
                          "Chose preferred node %s for %s (ignoring %d candidates)",
                          chosen->details->uname, rsc->id, g_list_length(nodes));
         }
     }
 
     if ((chosen == NULL) && (best != NULL)) {
         /* Either there is no preferred node, or the preferred node is not
          * suitable, but another node is allowed to run the resource.
          */
 
         chosen = best;
 
         if (!pe_rsc_is_unique_clone(rsc->parent)
             && (chosen->weight > 0) // Zero not acceptable
             && pcmk__node_available(chosen, false, false)) {
             /* If the resource is already running on a node, prefer that node if
              * it is just as good as the chosen node.
              *
              * We don't do this for unique clone instances, because
              * distribute_children() has already assigned instances to their
              * running nodes when appropriate, and if we get here, we don't want
              * remaining unassigned instances to prefer a node that's already
              * running another instance.
              */
             pe_node_t *running = pe__current_node(rsc);
 
             if (running == NULL) {
                 // Nothing to do
 
             } else if (!pcmk__node_available(running, true, false)) {
                 pe_rsc_trace(rsc, "Current node for %s (%s) can't run resources",
                              rsc->id, running->details->uname);
 
             } else {
                 int nodes_with_best_score = 1;
 
                 for (GList *iter = nodes->next; iter; iter = iter->next) {
                     pe_node_t *allowed = (pe_node_t *) iter->data;
 
                     if (allowed->weight != chosen->weight) {
                         // The nodes are sorted by weight, so no more are equal
                         break;
                     }
                     if (allowed->details == running->details) {
                         // Scores are equal, so prefer the current node
                         chosen = allowed;
                     }
                     nodes_with_best_score++;
                 }
 
                 if (nodes_with_best_score > 1) {
                     do_crm_log(((chosen->weight >= INFINITY)? LOG_WARNING : LOG_INFO),
                                "Chose node %s for %s from %d nodes with score %s",
                                chosen->details->uname, rsc->id,
                                nodes_with_best_score,
                                pcmk_readable_score(chosen->weight));
                 }
             }
         }
 
         pe_rsc_trace(rsc, "Chose node %s for %s from %d candidates",
                      chosen->details->uname, rsc->id, g_list_length(nodes));
     }
 
     result = pcmk__assign_primitive(rsc, chosen, false);
     g_list_free(nodes);
     return result;
 }
 
+/*!
+ * \internal
+ * \brief Assign a primitive resource to a node
+ *
+ * \param[in] rsc     Resource to assign to a node
+ * \param[in] prefer  Node to prefer, if all else is equal
+ *
+ * \return Node that \p rsc is assigned to, if assigned entirely to one node
+ */
 pe_node_t *
 pcmk__native_allocate(pe_resource_t *rsc, pe_node_t *prefer)
 {
     GList *gIter = NULL;
 
     if (rsc->parent && !pcmk_is_set(rsc->parent->flags, pe_rsc_allocating)) {
         /* never allocate children on their own */
         pe_rsc_debug(rsc, "Escalating allocation of %s to its parent: %s", rsc->id,
                      rsc->parent->id);
-        rsc->parent->cmds->allocate(rsc->parent, prefer);
+        rsc->parent->cmds->assign(rsc->parent, prefer);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return rsc->allocated_to;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_allocating)) {
         pe_rsc_debug(rsc, "Dependency loop detected involving %s", rsc->id);
         return NULL;
     }
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     pe__show_node_weights(true, rsc, "Pre-alloc", rsc->allowed_nodes,
                           rsc->cluster);
 
     for (gIter = rsc->rsc_cons; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
 
         GHashTable *archive = NULL;
         pe_resource_t *primary = constraint->primary;
 
         if ((constraint->dependent_role >= RSC_ROLE_PROMOTED)
             || (constraint->score < 0 && constraint->score > -INFINITY)) {
             archive = pcmk__copy_node_table(rsc->allowed_nodes);
         }
 
         pe_rsc_trace(rsc,
                      "%s: Allocating %s first (constraint=%s score=%d role=%s)",
                      rsc->id, primary->id, constraint->id,
                      constraint->score, role2text(constraint->dependent_role));
-        primary->cmds->allocate(primary, NULL);
+        primary->cmds->assign(primary, NULL);
         rsc->cmds->apply_coloc_score(rsc, primary, constraint, true);
         if (archive && !pcmk__any_node_available(rsc->allowed_nodes)) {
             pe_rsc_info(rsc, "%s: Rolling back scores from %s",
                         rsc->id, primary->id);
             g_hash_table_destroy(rsc->allowed_nodes);
             rsc->allowed_nodes = archive;
             archive = NULL;
         }
         if (archive) {
             g_hash_table_destroy(archive);
         }
     }
 
     pe__show_node_weights(true, rsc, "Post-coloc", rsc->allowed_nodes,
                           rsc->cluster);
 
     for (gIter = rsc->rsc_cons_lhs; gIter != NULL; gIter = gIter->next) {
         pcmk__colocation_t *constraint = (pcmk__colocation_t *) gIter->data;
         pe_resource_t *dependent = constraint->dependent;
         const float factor = constraint->score / (float) INFINITY;
 
         if (!pcmk__colocation_has_influence(constraint, NULL)) {
             continue;
         }
         pe_rsc_trace(rsc, "Merging score of '%s' constraint (%s with %s)",
                      constraint->id, constraint->dependent->id,
                      constraint->primary->id);
         dependent->cmds->add_colocated_node_scores(dependent, rsc->id,
                                                    &rsc->allowed_nodes,
                                                    constraint->node_attribute,
                                                    factor,
                                                    pcmk__coloc_select_active);
     }
 
     if (rsc->next_role == RSC_ROLE_STOPPED) {
         pe_rsc_trace(rsc, "Making sure %s doesn't get allocated", rsc->id);
         /* make sure it doesn't come up again */
         resource_location(rsc, NULL, -INFINITY, XML_RSC_ATTR_TARGET_ROLE,
                           rsc->cluster);
 
     } else if(rsc->next_role > rsc->role
               && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_quorum)
               && rsc->cluster->no_quorum_policy == no_quorum_freeze) {
         crm_notice("Resource %s cannot be elevated from %s to %s: no-quorum-policy=freeze",
                    rsc->id, role2text(rsc->role), role2text(rsc->next_role));
         pe__set_next_role(rsc, rsc->role, "no-quorum-policy=freeze");
     }
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
     if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)
         && !pcmk_is_set(rsc->cluster->flags, pe_flag_have_stonith_resource)) {
         pe__clear_resource_flags(rsc, pe_rsc_managed);
     }
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         const char *reason = NULL;
         pe_node_t *assign_to = NULL;
 
         pe__set_next_role(rsc, rsc->role, "unmanaged");
         assign_to = pe__current_node(rsc);
         if (assign_to == NULL) {
             reason = "inactive";
         } else if (rsc->role == RSC_ROLE_PROMOTED) {
             reason = "promoted";
         } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
             reason = "failed";
         } else {
             reason = "active";
         }
         pe_rsc_info(rsc, "Unmanaged resource %s allocated to %s: %s", rsc->id,
                     (assign_to? assign_to->details->uname : "no node"), reason);
         pcmk__assign_primitive(rsc, assign_to, true);
 
     } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stop_everything)) {
         pe_rsc_debug(rsc, "Forcing %s to stop", rsc->id);
         pcmk__assign_primitive(rsc, NULL, true);
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_provisional)
                && assign_best_node(rsc, prefer)) {
         pe_rsc_trace(rsc, "Allocated resource %s to %s", rsc->id,
                      rsc->allocated_to->details->uname);
 
     } else if (rsc->allocated_to == NULL) {
         if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
             pe_rsc_info(rsc, "Resource %s cannot run anywhere", rsc->id);
         } else if (rsc->running_on != NULL) {
             pe_rsc_info(rsc, "Stopping orphan resource %s", rsc->id);
         }
 
     } else {
         pe_rsc_debug(rsc, "Pre-Allocated resource %s to %s", rsc->id,
                      rsc->allocated_to->details->uname);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating);
 
     if (rsc->is_remote_node) {
         pe_node_t *remote_node = pe_find_node(rsc->cluster->nodes, rsc->id);
 
         CRM_ASSERT(remote_node != NULL);
         if (rsc->allocated_to && rsc->next_role != RSC_ROLE_STOPPED) {
             crm_trace("Setting Pacemaker Remote node %s to ONLINE",
                       remote_node->details->id);
             remote_node->details->online = TRUE;
             /* We shouldn't consider an unseen remote-node unclean if we are going
              * to try and connect to it. Otherwise we get an unnecessary fence */
             if (remote_node->details->unseen == TRUE) {
                 remote_node->details->unclean = FALSE;
             }
 
         } else {
             crm_trace("Setting Pacemaker Remote node %s to SHUTDOWN (next role %s, %sallocated)",
                       remote_node->details->id, role2text(rsc->next_role),
                       (rsc->allocated_to? "" : "un"));
             remote_node->details->shutdown = TRUE;
         }
     }
 
     return rsc->allocated_to;
 }
 
 static gboolean
 is_op_dup(pe_resource_t *rsc, const char *name, guint interval_ms)
 {
     gboolean dup = FALSE;
     const char *id = NULL;
     const char *value = NULL;
     xmlNode *operation = NULL;
     guint interval2_ms = 0;
 
     CRM_ASSERT(rsc);
     for (operation = pcmk__xe_first_child(rsc->ops_xml); operation != NULL;
          operation = pcmk__xe_next(operation)) {
 
         if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
             value = crm_element_value(operation, "name");
             if (!pcmk__str_eq(value, name, pcmk__str_casei)) {
                 continue;
             }
 
             value = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
             interval2_ms = crm_parse_interval_spec(value);
             if (interval_ms != interval2_ms) {
                 continue;
             }
 
             if (id == NULL) {
                 id = ID(operation);
 
             } else {
                 pcmk__config_err("Operation %s is duplicate of %s (do not use "
                                  "same name and interval combination more "
                                  "than once per resource)", ID(operation), id);
                 dup = TRUE;
             }
         }
     }
 
     return dup;
 }
 
 static bool
 op_cannot_recur(const char *name)
 {
     return pcmk__strcase_any_of(name, RSC_STOP, RSC_START, RSC_DEMOTE, RSC_PROMOTE, NULL);
 }
 
 static void
 RecurringOp(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
             xmlNode * operation, pe_working_set_t * data_set)
 {
     char *key = NULL;
     const char *name = NULL;
     const char *role = NULL;
     const char *interval_spec = NULL;
     const char *node_uname = node? node->details->uname : "n/a";
 
     guint interval_ms = 0;
     pe_action_t *mon = NULL;
     gboolean is_optional = TRUE;
     GList *possible_matches = NULL;
 
     CRM_ASSERT(rsc);
 
     /* Only process for the operations without role="Stopped" */
     role = crm_element_value(operation, "role");
     if (role && text2role(role) == RSC_ROLE_STOPPED) {
         return;
     }
 
     interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
     interval_ms = crm_parse_interval_spec(interval_spec);
     if (interval_ms == 0) {
         return;
     }
 
     name = crm_element_value(operation, "name");
     if (is_op_dup(rsc, name, interval_ms)) {
         crm_trace("Not creating duplicate recurring action %s for %dms %s",
                   ID(operation), interval_ms, name);
         return;
     }
 
     if (op_cannot_recur(name)) {
         pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
                          ID(operation), name);
         return;
     }
 
     key = pcmk__op_key(rsc->id, name, interval_ms);
     if (find_rsc_op_entry(rsc, key) == NULL) {
         crm_trace("Not creating recurring action %s for disabled resource %s",
                   ID(operation), rsc->id);
         free(key);
         return;
     }
 
     pe_rsc_trace(rsc, "Creating recurring action %s for %s in role %s on %s",
                  ID(operation), rsc->id, role2text(rsc->next_role), node_uname);
 
     if (start != NULL) {
         pe_rsc_trace(rsc, "Marking %s %s due to %s", key,
                      pcmk_is_set(start->flags, pe_action_optional)? "optional" : "mandatory",
                      start->uuid);
         is_optional = (rsc->cmds->action_flags(start, NULL) & pe_action_optional);
     } else {
         pe_rsc_trace(rsc, "Marking %s optional", key);
         is_optional = TRUE;
     }
 
     /* start a monitor for an already active resource */
     possible_matches = find_actions_exact(rsc->actions, key, node);
     if (possible_matches == NULL) {
         is_optional = FALSE;
         pe_rsc_trace(rsc, "Marking %s mandatory: not active", key);
 
     } else {
         GList *gIter = NULL;
 
         for (gIter = possible_matches; gIter != NULL; gIter = gIter->next) {
             pe_action_t *op = (pe_action_t *) gIter->data;
 
             if (pcmk_is_set(op->flags, pe_action_reschedule)) {
                 is_optional = FALSE;
                 break;
             }
         }
         g_list_free(possible_matches);
     }
 
     if (((rsc->next_role == RSC_ROLE_PROMOTED) && (role == NULL))
         || (role != NULL && text2role(role) != rsc->next_role)) {
         int log_level = LOG_TRACE;
         const char *result = "Ignoring";
 
         if (is_optional) {
             char *after_key = NULL;
             pe_action_t *cancel_op = NULL;
 
             // It's running, so cancel it
             log_level = LOG_INFO;
             result = "Cancelling";
             cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
 
             switch (rsc->role) {
                 case RSC_ROLE_UNPROMOTED:
                 case RSC_ROLE_STARTED:
                     if (rsc->next_role == RSC_ROLE_PROMOTED) {
                         after_key = promote_key(rsc);
 
                     } else if (rsc->next_role == RSC_ROLE_STOPPED) {
                         after_key = stop_key(rsc);
                     }
 
                     break;
                 case RSC_ROLE_PROMOTED:
                     after_key = demote_key(rsc);
                     break;
                 default:
                     break;
             }
 
             if (after_key) {
                 pcmk__new_ordering(rsc, NULL, cancel_op, rsc, after_key, NULL,
                                    pe_order_runnable_left, data_set);
             }
         }
 
         do_crm_log(log_level, "%s action %s (%s vs. %s)",
                    result, key, role ? role : role2text(RSC_ROLE_UNPROMOTED),
                    role2text(rsc->next_role));
 
         free(key);
         return;
     }
 
     mon = custom_action(rsc, key, name, node, is_optional, TRUE, data_set);
     key = mon->uuid;
     if (is_optional) {
         pe_rsc_trace(rsc, "%s\t   %s (optional)", node_uname, mon->uuid);
     }
 
     if ((start == NULL) || !pcmk_is_set(start->flags, pe_action_runnable)) {
         pe_rsc_debug(rsc, "%s\t   %s (cancelled : start un-runnable)",
                      node_uname, mon->uuid);
         pe__clear_action_flags(mon, pe_action_runnable);
 
     } else if (node == NULL || node->details->online == FALSE || node->details->unclean) {
         pe_rsc_debug(rsc, "%s\t   %s (cancelled : no node available)",
                      node_uname, mon->uuid);
         pe__clear_action_flags(mon, pe_action_runnable);
 
     } else if (!pcmk_is_set(mon->flags, pe_action_optional)) {
         pe_rsc_info(rsc, " Start recurring %s (%us) for %s on %s",
                     mon->task, interval_ms / 1000, rsc->id, node_uname);
     }
 
     if (rsc->next_role == RSC_ROLE_PROMOTED) {
         pe__add_action_expected_result(mon, CRM_EX_PROMOTED);
     }
 
     if ((node == NULL) || pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pcmk__new_ordering(rsc, start_key(rsc), NULL, NULL, strdup(key), mon,
                            pe_order_implies_then|pe_order_runnable_left,
                            data_set);
 
         pcmk__new_ordering(rsc, reload_key(rsc), NULL, NULL, strdup(key), mon,
                            pe_order_implies_then|pe_order_runnable_left,
                            data_set);
 
         if (rsc->next_role == RSC_ROLE_PROMOTED) {
             pcmk__new_ordering(rsc, promote_key(rsc), NULL, rsc, NULL, mon,
                                pe_order_optional|pe_order_runnable_left,
                                data_set);
 
         } else if (rsc->role == RSC_ROLE_PROMOTED) {
             pcmk__new_ordering(rsc, demote_key(rsc), NULL, rsc, NULL, mon,
                                pe_order_optional|pe_order_runnable_left,
                                data_set);
         }
     }
 }
 
 static void
 Recurring(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
         (node == NULL || node->details->maintenance == FALSE)) {
         xmlNode *operation = NULL;
 
         for (operation = pcmk__xe_first_child(rsc->ops_xml);
              operation != NULL;
              operation = pcmk__xe_next(operation)) {
 
             if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
                 RecurringOp(rsc, start, node, operation, data_set);
             }
         }
     }
 }
 
 static void
 RecurringOp_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node,
                     xmlNode * operation, pe_working_set_t * data_set)
 {
     char *key = NULL;
     const char *name = NULL;
     const char *role = NULL;
     const char *interval_spec = NULL;
     const char *node_uname = node? node->details->uname : "n/a";
 
     guint interval_ms = 0;
     GList *possible_matches = NULL;
     GList *gIter = NULL;
 
     /* Only process for the operations with role="Stopped" */
     role = crm_element_value(operation, "role");
     if (role == NULL || text2role(role) != RSC_ROLE_STOPPED) {
         return;
     }
 
     interval_spec = crm_element_value(operation, XML_LRM_ATTR_INTERVAL);
     interval_ms = crm_parse_interval_spec(interval_spec);
     if (interval_ms == 0) {
         return;
     }
 
     name = crm_element_value(operation, "name");
     if (is_op_dup(rsc, name, interval_ms)) {
         crm_trace("Not creating duplicate recurring action %s for %dms %s",
                   ID(operation), interval_ms, name);
         return;
     }
 
     if (op_cannot_recur(name)) {
         pcmk__config_err("Ignoring %s because action '%s' cannot be recurring",
                          ID(operation), name);
         return;
     }
 
     key = pcmk__op_key(rsc->id, name, interval_ms);
     if (find_rsc_op_entry(rsc, key) == NULL) {
         crm_trace("Not creating recurring action %s for disabled resource %s",
                   ID(operation), rsc->id);
         free(key);
         return;
     }
 
     // @TODO add support
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         crm_notice("Ignoring %s (recurring monitors for Stopped role are "
                    "not supported for anonymous clones)",
                    ID(operation));
         return;
     }
 
     pe_rsc_trace(rsc,
                  "Creating recurring action %s for %s in role %s on nodes where it should not be running",
                  ID(operation), rsc->id, role2text(rsc->next_role));
 
     /* if the monitor exists on the node where the resource will be running, cancel it */
     if (node != NULL) {
         possible_matches = find_actions_exact(rsc->actions, key, node);
         if (possible_matches) {
             pe_action_t *cancel_op = NULL;
 
             g_list_free(possible_matches);
 
             cancel_op = pcmk__new_cancel_action(rsc, name, interval_ms, node);
 
             if ((rsc->next_role == RSC_ROLE_STARTED)
                 || (rsc->next_role == RSC_ROLE_UNPROMOTED)) {
                 /* rsc->role == RSC_ROLE_STOPPED: cancel the monitor before start */
                 /* rsc->role == RSC_ROLE_STARTED: for a migration, cancel the monitor on the target node before start */
                 pcmk__new_ordering(rsc, NULL, cancel_op, rsc, start_key(rsc),
                                    NULL, pe_order_runnable_left, data_set);
             }
 
             pe_rsc_info(rsc, "Cancel action %s (%s vs. %s) on %s",
                         key, role, role2text(rsc->next_role), node_uname);
         }
     }
 
     for (gIter = data_set->nodes; gIter != NULL; gIter = gIter->next) {
         pe_node_t *stop_node = (pe_node_t *) gIter->data;
         const char *stop_node_uname = stop_node->details->uname;
         gboolean is_optional = TRUE;
         gboolean probe_is_optional = TRUE;
         gboolean stop_is_optional = TRUE;
         pe_action_t *stopped_mon = NULL;
         GList *stop_ops = NULL;
         GList *local_gIter = NULL;
 
         if (node && pcmk__str_eq(stop_node_uname, node_uname, pcmk__str_casei)) {
             continue;
         }
 
         pe_rsc_trace(rsc, "Creating recurring action %s for %s on %s",
                      ID(operation), rsc->id,
                      pcmk__s(stop_node_uname, "unknown node"));
 
         /* start a monitor for an already stopped resource */
         possible_matches = find_actions_exact(rsc->actions, key, stop_node);
         if (possible_matches == NULL) {
             pe_rsc_trace(rsc, "Marking %s mandatory on %s: not active", key,
                          pcmk__s(stop_node_uname, "unknown node"));
             is_optional = FALSE;
         } else {
             pe_rsc_trace(rsc, "Marking %s optional on %s: already active", key,
                          pcmk__s(stop_node_uname, "unknown node"));
             is_optional = TRUE;
             g_list_free(possible_matches);
         }
 
         stopped_mon = custom_action(rsc, strdup(key), name, stop_node, is_optional, TRUE, data_set);
 
         pe__add_action_expected_result(stopped_mon, CRM_EX_NOT_RUNNING);
 
         if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             GList *probes = pe__resource_actions(rsc, stop_node, RSC_STATUS,
                                                  FALSE);
             GList *pIter = NULL;
 
             for (pIter = probes; pIter != NULL; pIter = pIter->next) {
                 pe_action_t *probe = (pe_action_t *) pIter->data;
 
                 order_actions(probe, stopped_mon, pe_order_runnable_left);
                 crm_trace("%s then %s on %s", probe->uuid, stopped_mon->uuid, stop_node->details->uname);
             }
 
             g_list_free(probes);
         }
 
         stop_ops = pe__resource_actions(rsc, stop_node, RSC_STOP, TRUE);
 
         for (local_gIter = stop_ops; local_gIter != NULL; local_gIter = local_gIter->next) {
             pe_action_t *stop = (pe_action_t *) local_gIter->data;
 
             if (!pcmk_is_set(stop->flags, pe_action_optional)) {
                 stop_is_optional = FALSE;
             }
 
             if (!pcmk_is_set(stop->flags, pe_action_runnable)) {
                 crm_debug("%s\t   %s (cancelled : stop un-runnable)",
                           pcmk__s(stop_node_uname, "<null>"),
                           stopped_mon->uuid);
                 pe__clear_action_flags(stopped_mon, pe_action_runnable);
             }
 
             if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
                 pcmk__new_ordering(rsc, stop_key(rsc), stop, NULL, strdup(key),
                                    stopped_mon,
                                    pe_order_implies_then|pe_order_runnable_left,
                                    data_set);
             }
 
         }
 
         if (stop_ops) {
             g_list_free(stop_ops);
         }
 
         if (is_optional == FALSE && probe_is_optional && stop_is_optional
             && !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             pe_rsc_trace(rsc, "Marking %s optional on %s due to unmanaged",
                          key, pcmk__s(stop_node_uname, "unknown node"));
             pe__set_action_flags(stopped_mon, pe_action_optional);
         }
 
         if (pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
             pe_rsc_trace(rsc, "%s\t   %s (optional)",
                          pcmk__s(stop_node_uname, "<null>"),
                          stopped_mon->uuid);
         }
 
         if (stop_node->details->online == FALSE || stop_node->details->unclean) {
             pe_rsc_debug(rsc, "%s\t   %s (cancelled : no node available)",
                          pcmk__s(stop_node_uname, "<null>"),
                          stopped_mon->uuid);
             pe__clear_action_flags(stopped_mon, pe_action_runnable);
         }
 
         if (pcmk_is_set(stopped_mon->flags, pe_action_runnable)
             && !pcmk_is_set(stopped_mon->flags, pe_action_optional)) {
             crm_notice(" Start recurring %s (%us) for %s on %s", stopped_mon->task,
                        interval_ms / 1000, rsc->id,
                        pcmk__s(stop_node_uname, "unknown node"));
         }
     }
 
     free(key);
 }
 
 static void
 Recurring_Stopped(pe_resource_t * rsc, pe_action_t * start, pe_node_t * node, pe_working_set_t * data_set)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_maintenance) &&
         (node == NULL || node->details->maintenance == FALSE)) {
         xmlNode *operation = NULL;
 
         for (operation = pcmk__xe_first_child(rsc->ops_xml);
              operation != NULL;
              operation = pcmk__xe_next(operation)) {
 
             if (pcmk__str_eq((const char *)operation->name, "op", pcmk__str_none)) {
                 RecurringOp_Stopped(rsc, start, node, operation, data_set);
             }
         }
     }
 }
 
 static void
 handle_migration_actions(pe_resource_t * rsc, pe_node_t *current, pe_node_t *chosen, pe_working_set_t * data_set)
 {
     pe_action_t *migrate_to = NULL;
     pe_action_t *migrate_from = NULL;
     pe_action_t *start = NULL;
     pe_action_t *stop = NULL;
     gboolean partial = rsc->partial_migration_target ? TRUE : FALSE;
 
     pe_rsc_trace(rsc, "Processing migration actions %s moving from %s to %s . partial migration = %s",
     rsc->id, current->details->id, chosen->details->id, partial ? "TRUE" : "FALSE");
     start = start_action(rsc, chosen, TRUE);
     stop = stop_action(rsc, current, TRUE);
 
     if (partial == FALSE) {
         migrate_to = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
                                    RSC_MIGRATE, current, TRUE, TRUE, data_set);
     }
 
     migrate_from = custom_action(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
                                  RSC_MIGRATED, chosen, TRUE, TRUE, data_set);
 
     if ((migrate_to && migrate_from) || (migrate_from && partial)) {
 
         pe__set_action_flags(start, pe_action_migrate_runnable);
         pe__set_action_flags(stop, pe_action_migrate_runnable);
 
         // This is easier than trying to delete it from the graph
         pe__set_action_flags(start, pe_action_pseudo);
 
         /* order probes before migrations */
         if (partial) {
             pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
             migrate_from->needs = start->needs;
 
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
                                rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
                                NULL, pe_order_optional, data_set);
 
         } else {
             pe__set_action_flags(migrate_from, pe_action_migrate_runnable);
             pe__set_action_flags(migrate_to, pe_action_migrate_runnable);
             migrate_to->needs = start->needs;
 
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0), NULL,
                                rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0),
                                NULL, pe_order_optional, data_set);
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATE, 0), NULL,
                                rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0),
                                NULL,
                                pe_order_optional|pe_order_implies_first_migratable,
                                data_set);
         }
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                            pe_order_optional|pe_order_implies_first_migratable,
                            data_set);
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_MIGRATED, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                            pe_order_optional|pe_order_implies_first_migratable|pe_order_pseudo_left,
                            data_set);
     }
 
     if (migrate_to) {
         add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
         add_hash_param(migrate_to->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
 
         /* Pacemaker Remote connections don't require pending to be recorded in
          * the CIB. We can reduce CIB writes by not setting PENDING for them.
          */
         if (rsc->is_remote_node == FALSE) {
             /* migrate_to takes place on the source node, but can 
              * have an effect on the target node depending on how
              * the agent is written. Because of this, we have to maintain
              * a record that the migrate_to occurred, in case the source node
              * loses membership while the migrate_to action is still in-flight.
              */
             add_hash_param(migrate_to->meta, XML_OP_ATTR_PENDING, "true");
         }
     }
 
     if (migrate_from) {
         add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_SOURCE, current->details->uname);
         add_hash_param(migrate_from->meta, XML_LRM_ATTR_MIGRATE_TARGET, chosen->details->uname);
     }
 }
 
 /*!
  * \internal
  * \brief Schedule actions to bring resource down and back to current role
  *
  * \param[in] rsc           Resource to restart
  * \param[in] current       Node that resource should be brought down on
  * \param[in] chosen        Node that resource should be brought up on
  * \param[in] need_stop     Whether the resource must be stopped
  * \param[in] need_promote  Whether the resource must be promoted
  *
  * \return Role that resource would have after scheduled actions are taken
  */
 static void
 schedule_restart_actions(pe_resource_t *rsc, pe_node_t *current,
                          pe_node_t *chosen, bool need_stop, bool need_promote)
 {
     enum rsc_role_e role = rsc->role;
     enum rsc_role_e next_role;
 
     pe__set_resource_flags(rsc, pe_rsc_restarting);
 
     // Bring resource down to a stop on its current node
     while (role != RSC_ROLE_STOPPED) {
         next_role = rsc_state_matrix[role][RSC_ROLE_STOPPED];
         pe_rsc_trace(rsc, "Creating %s action to take %s down from %s to %s",
                      (need_stop? "required" : "optional"), rsc->id,
                      role2text(role), role2text(next_role));
         if (!rsc_action_matrix[role][next_role](rsc, current, !need_stop)) {
             break;
         }
         role = next_role;
     }
 
     // Bring resource up to its next role on its next node
     while ((rsc->role <= rsc->next_role) && (role != rsc->role)
            && !pcmk_is_set(rsc->flags, pe_rsc_block)) {
         bool required = need_stop;
 
         next_role = rsc_state_matrix[role][rsc->role];
         if ((next_role == RSC_ROLE_PROMOTED) && need_promote) {
             required = true;
         }
         pe_rsc_trace(rsc, "Creating %s action to take %s up from %s to %s",
                      (required? "required" : "optional"), rsc->id,
                      role2text(role), role2text(next_role));
         if (!rsc_action_matrix[role][next_role](rsc, chosen, !required)) {
             break;
         }
         role = next_role;
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_restarting);
 }
 
 void
 native_create_actions(pe_resource_t *rsc)
 {
     pe_action_t *start = NULL;
     pe_node_t *chosen = NULL;
     pe_node_t *current = NULL;
     gboolean need_stop = FALSE;
     bool need_promote = FALSE;
     gboolean is_moving = FALSE;
     gboolean allow_migrate = FALSE;
 
     GList *gIter = NULL;
     unsigned int num_all_active = 0;
     unsigned int num_clean_active = 0;
     bool multiply_active = FALSE;
     enum rsc_role_e role = RSC_ROLE_UNKNOWN;
     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
 
     CRM_ASSERT(rsc != NULL);
     allow_migrate = pcmk_is_set(rsc->flags, pe_rsc_allow_migrate)? TRUE : FALSE;
 
     chosen = rsc->allocated_to;
     next_role = rsc->next_role;
     if (next_role == RSC_ROLE_UNKNOWN) {
         pe__set_next_role(rsc,
                           (chosen == NULL)? RSC_ROLE_STOPPED : RSC_ROLE_STARTED,
                           "allocation");
     }
     pe_rsc_trace(rsc, "Creating all actions for %s transition from %s to %s (%s) on %s",
                  rsc->id, role2text(rsc->role), role2text(rsc->next_role),
                  ((next_role == RSC_ROLE_UNKNOWN)? "implicit" : "explicit"),
                  ((chosen == NULL)? "no node" : chosen->details->uname));
 
     current = pe__find_active_on(rsc, &num_all_active, &num_clean_active);
 
     for (gIter = rsc->dangling_migrations; gIter != NULL; gIter = gIter->next) {
         pe_node_t *dangling_source = (pe_node_t *) gIter->data;
 
         pe_action_t *stop = NULL;
 
         pe_rsc_trace(rsc, "Creating stop action %sfor %s on %s due to dangling migration",
                      pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)? "and cleanup " : "",
                      rsc->id, dangling_source->details->uname);
         stop = stop_action(rsc, dangling_source, FALSE);
         pe__set_action_flags(stop, pe_action_dangle);
         if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) {
             DeleteRsc(rsc, dangling_source, FALSE, rsc->cluster);
         }
     }
 
     if ((num_all_active == 2) && (num_clean_active == 2) && chosen
         && rsc->partial_migration_source && rsc->partial_migration_target
         && (current->details == rsc->partial_migration_source->details)
         && (chosen->details == rsc->partial_migration_target->details)) {
 
         /* The chosen node is still the migration target from a partial
          * migration. Attempt to continue the migration instead of recovering
          * by stopping the resource everywhere and starting it on a single node.
          */
         pe_rsc_trace(rsc, "Will attempt to continue with partial migration "
                      "to target %s from %s",
                      rsc->partial_migration_target->details->id,
                      rsc->partial_migration_source->details->id);
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         /* If a resource has "requires" set to nothing or quorum, don't consider
          * it active on unclean nodes (similar to how all resources behave when
          * stonith-enabled is false). We can start such resources elsewhere
          * before fencing completes, and if we considered the resource active on
          * the failed node, we would attempt recovery for being active on
          * multiple nodes.
          */
         multiply_active = (num_clean_active > 1);
     } else {
         multiply_active = (num_all_active > 1);
     }
 
     if (multiply_active) {
         if (rsc->partial_migration_target && rsc->partial_migration_source) {
             // Migration was in progress, but we've chosen a different target
             crm_notice("Resource %s can no longer migrate from %s to %s "
                        "(will stop on both nodes)",
                        rsc->id, rsc->partial_migration_source->details->uname,
                        rsc->partial_migration_target->details->uname);
             multiply_active = false;
 
         } else {
             const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
             // Resource was (possibly) incorrectly multiply active
             pe_proc_err("%s resource %s might be active on %u nodes (%s)",
                         pcmk__s(class, "Untyped"), rsc->id, num_all_active,
                         recovery2text(rsc->recovery_type));
             crm_notice("See https://wiki.clusterlabs.org/wiki/FAQ#Resource_is_Too_Active for more information");
         }
 
         switch (rsc->recovery_type) {
             case recovery_stop_start:
                 need_stop = TRUE;
                 break;
             case recovery_stop_unexpected:
                 need_stop = TRUE; // StopRsc() will skip expected node
                 pe__set_resource_flags(rsc, pe_rsc_stop_unexpected);
                 break;
             default:
                 break;
         }
 
         /* If by chance a partial migration is in process, but the migration
          * target is not chosen still, clear all partial migration data.
          */
         rsc->partial_migration_source = rsc->partial_migration_target = NULL;
         allow_migrate = FALSE;
     }
 
     if (!multiply_active) {
         pe__clear_resource_flags(rsc, pe_rsc_stop_unexpected);
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_start_pending)) {
         pe_rsc_trace(rsc, "Creating start action for %s to represent already pending start",
                      rsc->id);
         start = start_action(rsc, chosen, TRUE);
         pe__set_action_flags(start, pe_action_print_always);
     }
 
     if (current && chosen && current->details != chosen->details) {
         pe_rsc_trace(rsc, "Moving %s from %s to %s",
                      rsc->id, pcmk__s(current->details->uname, "unknown node"),
                      pcmk__s(chosen->details->uname, "unknown node"));
         is_moving = TRUE;
         need_stop = TRUE;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         if (pcmk_is_set(rsc->flags, pe_rsc_stop)) {
             need_stop = TRUE;
             pe_rsc_trace(rsc, "Recovering %s", rsc->id);
         } else {
             pe_rsc_trace(rsc, "Recovering %s by demotion", rsc->id);
             if (rsc->next_role == RSC_ROLE_PROMOTED) {
                 need_promote = TRUE;
             }
         }
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
         pe_rsc_trace(rsc, "Blocking further actions on %s", rsc->id);
         need_stop = TRUE;
 
     } else if (rsc->role > RSC_ROLE_STARTED && current != NULL && chosen != NULL) {
         pe_rsc_trace(rsc, "Creating start action for promoted resource %s",
                      rsc->id);
         start = start_action(rsc, chosen, TRUE);
         if (!pcmk_is_set(start->flags, pe_action_optional)) {
             // Recovery of a promoted resource
             pe_rsc_trace(rsc, "%s restart is required for recovery", rsc->id);
             need_stop = TRUE;
         }
     }
 
     /* Create any additional actions required when bringing resource down and
      * back up to same level.
      */
     schedule_restart_actions(rsc, current, chosen, need_stop, need_promote);
 
     /* Required steps from this role to the next */
     role = rsc->role;
     while (role != rsc->next_role) {
         next_role = rsc_state_matrix[role][rsc->next_role];
         pe_rsc_trace(rsc, "Creating action to take %s from %s to %s (ending at %s)",
                      rsc->id, role2text(role), role2text(next_role),
                      role2text(rsc->next_role));
         if (!rsc_action_matrix[role][next_role](rsc, chosen, false)) {
             break;
         }
         role = next_role;
     }
 
     if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
         pe_rsc_trace(rsc, "Not creating recurring monitors for blocked resource %s",
                      rsc->id);
 
     } else if ((rsc->next_role != RSC_ROLE_STOPPED)
                || !pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_rsc_trace(rsc, "Creating recurring monitors for %s resource %s",
                      ((rsc->next_role == RSC_ROLE_STOPPED)? "unmanaged" : "active"),
                      rsc->id);
         start = start_action(rsc, chosen, TRUE);
         Recurring(rsc, start, chosen, rsc->cluster);
         Recurring_Stopped(rsc, start, chosen, rsc->cluster);
 
     } else {
         pe_rsc_trace(rsc, "Creating recurring monitors for inactive resource %s",
                      rsc->id);
         Recurring_Stopped(rsc, NULL, NULL, rsc->cluster);
     }
 
     /* if we are stuck in a partial migration, where the target
      * of the partial migration no longer matches the chosen target.
      * A full stop/start is required */
     if (rsc->partial_migration_target && (chosen == NULL || rsc->partial_migration_target->details != chosen->details)) {
         pe_rsc_trace(rsc, "Not allowing partial migration of %s to continue",
                      rsc->id);
         allow_migrate = FALSE;
 
     } else if (!is_moving || !pcmk_is_set(rsc->flags, pe_rsc_managed)
                || pcmk_any_flags_set(rsc->flags,
                                      pe_rsc_failed|pe_rsc_start_pending)
                || (current && current->details->unclean)
                || rsc->next_role < RSC_ROLE_STARTED) {
 
         allow_migrate = FALSE;
     }
 
     if (allow_migrate) {
         handle_migration_actions(rsc, current, chosen, rsc->cluster);
     }
 }
 
 static void
 rsc_avoids_remote_nodes(pe_resource_t *rsc)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         if (node->details->remote_rsc) {
             node->weight = -INFINITY;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Return allowed nodes as (possibly sorted) list
  *
  * Convert a resource's hash table of allowed nodes to a list. If printing to
  * stdout, sort the list, to keep action ID numbers consistent for regression
  * test output (while avoiding the performance hit on a live cluster).
  *
  * \param[in] rsc       Resource to check for allowed nodes
  * \param[in] data_set  Cluster working set
  *
  * \return List of resource's allowed nodes
  * \note Callers should take care not to rely on the list being sorted.
  */
 static GList *
 allowed_nodes_as_list(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     GList *allowed_nodes = NULL;
 
     if (rsc->allowed_nodes) {
         allowed_nodes = g_hash_table_get_values(rsc->allowed_nodes);
     }
 
     if (!pcmk__is_daemon) {
         allowed_nodes = g_list_sort(allowed_nodes, pe__cmp_node_name);
     }
 
     return allowed_nodes;
 }
 
 void
 native_internal_constraints(pe_resource_t *rsc)
 {
     /* This function is on the critical path and worth optimizing as much as possible */
 
     pe_resource_t *top = NULL;
     GList *allowed_nodes = NULL;
     bool check_unfencing = FALSE;
     bool check_utilization = false;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_rsc_trace(rsc,
                      "Skipping native constraints for unmanaged resource: %s",
                      rsc->id);
         return;
     }
 
     top = uber_parent(rsc);
 
     // Whether resource requires unfencing
     check_unfencing = !pcmk_is_set(rsc->flags, pe_rsc_fence_device)
                       && pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)
                       && pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing);
 
     // Whether a non-default placement strategy is used
     check_utilization = (g_hash_table_size(rsc->utilization) > 0)
                          && !pcmk__str_eq(rsc->cluster->placement_strategy,
                                           "default", pcmk__str_casei);
 
     // Order stops before starts (i.e. restart)
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                        rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                        pe_order_optional|pe_order_implies_then|pe_order_restart,
                        rsc->cluster);
 
     // Promotable ordering: demote before stop, start before promote
     if (pcmk_is_set(top->flags, pe_rsc_promotable)
         || (rsc->role > RSC_ROLE_UNPROMOTED)) {
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_DEMOTE, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                            pe_order_promoted_implies_first, rsc->cluster);
 
         pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_START, 0), NULL,
                            rsc, pcmk__op_key(rsc->id, RSC_PROMOTE, 0), NULL,
                            pe_order_runnable_left, rsc->cluster);
     }
 
     // Don't clear resource history if probing on same node
     pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, CRM_OP_LRM_DELETE, 0),
                        NULL, rsc, pcmk__op_key(rsc->id, RSC_STATUS, 0),
                        NULL, pe_order_same_node|pe_order_then_cancels_first,
                        rsc->cluster);
 
     // Certain checks need allowed nodes
     if (check_unfencing || check_utilization || rsc->container) {
         allowed_nodes = allowed_nodes_as_list(rsc, rsc->cluster);
     }
 
     if (check_unfencing) {
         /* Check if the node needs to be unfenced first */
 
         for (GList *item = allowed_nodes; item; item = item->next) {
             pe_node_t *node = item->data;
             pe_action_t *unfence = pe_fence_op(node, "on", TRUE, NULL, FALSE,
                                                rsc->cluster);
 
             crm_debug("Ordering any stops of %s before %s, and any starts after",
                       rsc->id, unfence->uuid);
 
             /*
              * It would be more efficient to order clone resources once,
              * rather than order each instance, but ordering the instance
              * allows us to avoid unnecessary dependencies that might conflict
              * with user constraints.
              *
              * @TODO: This constraint can still produce a transition loop if the
              * resource has a stop scheduled on the node being unfenced, and
              * there is a user ordering constraint to start some other resource
              * (which will be ordered after the unfence) before stopping this
              * resource. An example is "start some slow-starting cloned service
              * before stopping an associated virtual IP that may be moving to
              * it":
              *       stop this -> unfencing -> start that -> stop this
              */
             pcmk__new_ordering(rsc, stop_key(rsc), NULL,
                                NULL, strdup(unfence->uuid), unfence,
                                pe_order_optional|pe_order_same_node,
                                rsc->cluster);
 
             pcmk__new_ordering(NULL, strdup(unfence->uuid), unfence,
                                rsc, start_key(rsc), NULL,
                                pe_order_implies_then_on_node|pe_order_same_node,
                                rsc->cluster);
         }
     }
 
     if (check_utilization) {
         pcmk__create_utilization_constraints(rsc, allowed_nodes);
     }
 
     if (rsc->container) {
         pe_resource_t *remote_rsc = NULL;
 
         if (rsc->is_remote_node) {
             // rsc is the implicit remote connection for a guest or bundle node
 
             /* Do not allow a guest resource to live on a Pacemaker Remote node,
              * to avoid nesting remotes. However, allow bundles to run on remote
              * nodes.
              */
             if (!pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
                 rsc_avoids_remote_nodes(rsc->container);
             }
 
             /* If someone cleans up a guest or bundle node's container, we will
              * likely schedule a (re-)probe of the container and recovery of the
              * connection. Order the connection stop after the container probe,
              * so that if we detect the container running, we will trigger a new
              * transition and avoid the unnecessary recovery.
              */
             pcmk__order_resource_actions(rsc->container, RSC_STATUS, rsc,
                                          RSC_STOP, pe_order_optional);
 
         /* A user can specify that a resource must start on a Pacemaker Remote
          * node by explicitly configuring it with the container=NODENAME
          * meta-attribute. This is of questionable merit, since location
          * constraints can accomplish the same thing. But we support it, so here
          * we check whether a resource (that is not itself a remote connection)
          * has container set to a remote node or guest node resource.
          */
         } else if (rsc->container->is_remote_node) {
             remote_rsc = rsc->container;
         } else  {
             remote_rsc = pe__resource_contains_guest_node(rsc->cluster,
                                                           rsc->container);
         }
 
         if (remote_rsc) {
             /* Force the resource on the Pacemaker Remote node instead of
              * colocating the resource with the container resource.
              */
             for (GList *item = allowed_nodes; item; item = item->next) {
                 pe_node_t *node = item->data;
 
                 if (node->details->remote_rsc != remote_rsc) {
                     node->weight = -INFINITY;
                 }
             }
 
         } else {
             /* This resource is either a filler for a container that does NOT
              * represent a Pacemaker Remote node, or a Pacemaker Remote
              * connection resource for a guest node or bundle.
              */
             int score;
 
             crm_trace("Order and colocate %s relative to its container %s",
                       rsc->id, rsc->container->id);
 
             pcmk__new_ordering(rsc->container,
                                pcmk__op_key(rsc->container->id, RSC_START, 0),
                                NULL, rsc, pcmk__op_key(rsc->id, RSC_START, 0),
                                NULL,
                                pe_order_implies_then|pe_order_runnable_left,
                                rsc->cluster);
 
             pcmk__new_ordering(rsc, pcmk__op_key(rsc->id, RSC_STOP, 0), NULL,
                                rsc->container,
                                pcmk__op_key(rsc->container->id, RSC_STOP, 0),
                                NULL, pe_order_implies_first, rsc->cluster);
 
             if (pcmk_is_set(rsc->flags, pe_rsc_allow_remote_remotes)) {
                 score = 10000;    /* Highly preferred but not essential */
             } else {
                 score = INFINITY; /* Force them to run on the same host */
             }
             pcmk__new_colocation("resource-with-container", NULL, score, rsc,
                                  rsc->container, NULL, NULL, true, rsc->cluster);
         }
     }
 
     if (rsc->is_remote_node || pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
         /* don't allow remote nodes to run stonith devices
          * or remote connection resources.*/
         rsc_avoids_remote_nodes(rsc);
     }
     g_list_free(allowed_nodes);
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in] dependent      Dependent resource in colocation
  * \param[in] primary        Primary resource in colocation
  * \param[in] colocation     Colocation constraint to apply
  * \param[in] for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
                                   pe_resource_t *primary,
                                   pcmk__colocation_t *colocation,
                                   bool for_dependent)
 {
     enum pcmk__coloc_affects filter_results;
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
 
     if (for_dependent) {
         // Always process on behalf of primary resource
         primary->cmds->apply_coloc_score(dependent, primary, colocation, false);
         return;
     }
 
     filter_results = pcmk__colocation_affects(dependent, primary, colocation,
                                               false);
     pe_rsc_trace(dependent, "%s %s with %s (%s, score=%d, filter=%d)",
                  ((colocation->score > 0)? "Colocating" : "Anti-colocating"),
                  dependent->id, primary->id, colocation->id, colocation->score,
                  filter_results);
 
     switch (filter_results) {
         case pcmk__coloc_affects_role:
             pcmk__apply_coloc_to_priority(dependent, primary, colocation);
             break;
         case pcmk__coloc_affects_location:
             pcmk__apply_coloc_to_weights(dependent, primary, colocation);
             break;
         default: // pcmk__coloc_affects_nothing
             return;
     }
 }
 
 enum pe_action_flags
 native_action_flags(pe_action_t * action, pe_node_t * node)
 {
     return action->flags;
 }
 
 void
 native_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     pcmk__apply_location(constraint, rsc);
 }
 
 /*!
  * \internal
  * \brief Check whether a node is a multiply active resource's expected node
  *
  * \param[in] rsc  Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p rsc is multiply active with multiple-active set to
  *         stop_unexpected, and \p node is the node where it will remain active
  * \note This assumes that the resource's next role cannot be changed to stopped
  *       after this is called, which should be reasonable if status has already
  *       been unpacked and resources have been assigned to nodes.
  */
 static bool
 is_expected_node(const pe_resource_t *rsc, const pe_node_t *node)
 {
     return pcmk_all_flags_set(rsc->flags,
                               pe_rsc_stop_unexpected|pe_rsc_restarting)
            && (rsc->next_role > RSC_ROLE_STOPPED)
            && (rsc->allocated_to != NULL) && (node != NULL)
            && (rsc->allocated_to->details == node->details);
 }
 
 static bool
 StopRsc(pe_resource_t *rsc, pe_node_t *next, bool optional)
 {
     GList *gIter = NULL;
 
     CRM_ASSERT(rsc);
 
     for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *current = (pe_node_t *) gIter->data;
         pe_action_t *stop;
 
         if (is_expected_node(rsc, current)) {
             /* We are scheduling restart actions for a multiply active resource
              * with multiple-active=stop_unexpected, and this is where it should
              * not be stopped.
              */
             pe_rsc_trace(rsc,
                          "Skipping stop of multiply active resource %s "
                          "on expected node %s",
                          rsc->id, current->details->uname);
             continue;
         }
 
         if (rsc->partial_migration_target) {
             if (rsc->partial_migration_target->details == current->details
                 // Only if the allocated node still is the migration target.
                 && rsc->allocated_to
                 && rsc->allocated_to->details == rsc->partial_migration_target->details) {
                 pe_rsc_trace(rsc,
                              "Skipping stop of %s on %s "
                              "because migration to %s in progress",
                              rsc->id, current->details->uname,
                              next->details->uname);
                 continue;
             } else {
                 pe_rsc_trace(rsc,
                              "Forcing stop of %s on %s "
                              "because migration target changed",
                              rsc->id, current->details->uname);
                 optional = false;
             }
         }
 
         pe_rsc_trace(rsc, "Scheduling stop of %s on %s",
                      rsc->id, current->details->uname);
         stop = stop_action(rsc, current, optional);
 
         if(rsc->allocated_to == NULL) {
             pe_action_set_reason(stop, "node availability", TRUE);
         } else if (pcmk_all_flags_set(rsc->flags, pe_rsc_restarting
                                                   |pe_rsc_stop_unexpected)) {
             /* We are stopping a multiply active resource on a node that is
              * not its expected node, and we are still scheduling restart
              * actions, so the stop is for being multiply active.
              */
             pe_action_set_reason(stop, "being multiply active", TRUE);
         }
 
         if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             pe__clear_action_flags(stop, pe_action_runnable);
         }
 
         if (pcmk_is_set(rsc->cluster->flags, pe_flag_remove_after_stop)) {
             DeleteRsc(rsc, current, optional, rsc->cluster);
         }
 
         if (pcmk_is_set(rsc->flags, pe_rsc_needs_unfencing)) {
             pe_action_t *unfence = pe_fence_op(current, "on", TRUE, NULL, FALSE,
                                                rsc->cluster);
 
             order_actions(stop, unfence, pe_order_implies_first);
             if (!pcmk__node_unfenced(current)) {
                 pe_proc_err("Stopping %s until %s can be unfenced", rsc->id, current->details->uname);
             }
         }
     }
 
     return true;
 }
 
 static bool
 StartRsc(pe_resource_t *rsc, pe_node_t *next, bool optional)
 {
     pe_action_t *start = NULL;
 
     CRM_ASSERT(rsc);
 
     pe_rsc_trace(rsc, "Scheduling %s start of %s on %s (weight=%d)",
                  (optional? "optional" : "required"), rsc->id,
                  ((next == NULL)? "N/A" : next->details->uname),
                  ((next == NULL)? 0 : next->weight));
     start = start_action(rsc, next, TRUE);
 
     pcmk__order_vs_unfence(rsc, next, start, pe_order_implies_then);
 
     if (pcmk_is_set(start->flags, pe_action_runnable) && !optional) {
         pe__clear_action_flags(start, pe_action_optional);
     }
 
     if (is_expected_node(rsc, next)) {
         /* This could be a problem if the start becomes necessary for other
          * reasons later.
          */
         pe_rsc_trace(rsc,
                      "Start of multiply active resouce %s "
                      "on expected node %s will be a pseudo-action",
                      rsc->id, next->details->uname);
         pe__set_action_flags(start, pe_action_pseudo);
     }
 
     return true;
 }
 
 static bool
 PromoteRsc(pe_resource_t *rsc, pe_node_t *next, bool optional)
 {
     GList *gIter = NULL;
     gboolean runnable = TRUE;
     GList *action_list = NULL;
 
     CRM_ASSERT(rsc);
     CRM_CHECK(next != NULL, return false);
 
     pe_rsc_trace(rsc, "%s on %s", rsc->id, next->details->uname);
 
     action_list = pe__resource_actions(rsc, next, RSC_START, TRUE);
 
     for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
         pe_action_t *start = (pe_action_t *) gIter->data;
 
         if (!pcmk_is_set(start->flags, pe_action_runnable)) {
             runnable = FALSE;
         }
     }
     g_list_free(action_list);
 
     if (runnable) {
         pe_action_t *promote = promote_action(rsc, next, optional);
 
         if (is_expected_node(rsc, next)) {
             /* This could be a problem if the promote becomes necessary for
              * other reasons later.
              */
             pe_rsc_trace(rsc,
                          "Promotion of multiply active resouce %s "
                          "on expected node %s will be a pseudo-action",
                          rsc->id, next->details->uname);
             pe__set_action_flags(promote, pe_action_pseudo);
         }
 
         return true;
     }
 
     pe_rsc_debug(rsc, "%s\tPromote %s (canceled)", next->details->uname, rsc->id);
 
     action_list = pe__resource_actions(rsc, next, RSC_PROMOTE, TRUE);
 
     for (gIter = action_list; gIter != NULL; gIter = gIter->next) {
         pe_action_t *promote = (pe_action_t *) gIter->data;
 
         pe__clear_action_flags(promote, pe_action_runnable);
     }
 
     g_list_free(action_list);
     return true;
 }
 
 static bool
 DemoteRsc(pe_resource_t *rsc, pe_node_t *next, bool optional)
 {
     GList *gIter = NULL;
 
     CRM_ASSERT(rsc);
 
     if (is_expected_node(rsc, next)) {
         pe_rsc_trace(rsc,
                      "Skipping demote of multiply active resource %s "
                      "on expected node %s",
                      rsc->id, next->details->uname);
         return true;
     }
 
     pe_rsc_trace(rsc, "%s", rsc->id);
 
     /* CRM_CHECK(rsc->next_role == RSC_ROLE_UNPROMOTED, return FALSE); */
     for (gIter = rsc->running_on; gIter != NULL; gIter = gIter->next) {
         pe_node_t *current = (pe_node_t *) gIter->data;
 
         pe_rsc_trace(rsc, "%s on %s", rsc->id, next ? next->details->uname : "N/A");
         demote_action(rsc, current, optional);
     }
     return true;
 }
 
 static bool
 RoleError(pe_resource_t *rsc, pe_node_t *next, bool optional)
 {
     CRM_ASSERT(rsc);
     crm_err("%s on %s", rsc->id, next ? next->details->uname : "N/A");
     CRM_CHECK(false, return false);
     return false;
 }
 
 static bool
 NullOp(pe_resource_t *rsc, pe_node_t *next, bool optional)
 {
     CRM_ASSERT(rsc);
     pe_rsc_trace(rsc, "%s", rsc->id);
     return FALSE;
 }
 
 gboolean
 DeleteRsc(pe_resource_t * rsc, pe_node_t * node, gboolean optional, pe_working_set_t * data_set)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         pe_rsc_trace(rsc, "Resource %s not deleted from %s: failed", rsc->id, node->details->uname);
         return FALSE;
 
     } else if (node == NULL) {
         pe_rsc_trace(rsc, "Resource %s not deleted: NULL node", rsc->id);
         return FALSE;
 
     } else if (node->details->unclean || node->details->online == FALSE) {
         pe_rsc_trace(rsc, "Resource %s not deleted from %s: unrunnable", rsc->id,
                      node->details->uname);
         return FALSE;
     }
 
     crm_notice("Removing %s from %s", rsc->id, node->details->uname);
 
     delete_action(rsc, node, optional);
 
     pcmk__order_resource_actions(rsc, RSC_STOP, rsc, RSC_DELETE,
                                  optional? pe_order_implies_then : pe_order_optional);
 
     pcmk__order_resource_actions(rsc, RSC_DELETE, rsc, RSC_START,
                                  optional? pe_order_implies_then : pe_order_optional);
 
     return TRUE;
 }
 
 void
 native_append_meta(pe_resource_t * rsc, xmlNode * xml)
 {
     char *value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_INCARNATION);
     pe_resource_t *parent;
 
     if (value) {
         char *name = NULL;
 
         name = crm_meta_name(XML_RSC_ATTR_INCARNATION);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     value = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_REMOTE_NODE);
     if (value) {
         char *name = NULL;
 
         name = crm_meta_name(XML_RSC_ATTR_REMOTE_NODE);
         crm_xml_add(xml, name, value);
         free(name);
     }
 
     for (parent = rsc; parent != NULL; parent = parent->parent) {
         if (parent->container) {
             crm_xml_add(xml, CRM_META"_"XML_RSC_ATTR_CONTAINER, parent->container->id);
         }
     }
 }
 
 // Primitive implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__primitive_add_utilization(pe_resource_t *rsc, pe_resource_t *orig_rsc,
                                 GList *all_rscs, GHashTable *utilization)
 {
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     pe_rsc_trace(orig_rsc, "%s: Adding primitive %s as colocated utilization",
                  orig_rsc->id, rsc->id);
     pcmk__release_node_capacity(utilization, rsc);
 }
 
 /*!
  * \internal
  * \brief Get epoch time of node's shutdown attribute (or now if none)
  *
  * \param[in] node      Node to check
  * \param[in] data_set  Cluster working set
  *
  * \return Epoch time corresponding to shutdown attribute if set or now if not
  */
 static time_t
 shutdown_time(pe_node_t *node, pe_working_set_t *data_set)
 {
     const char *shutdown = pe_node_attribute_raw(node, XML_CIB_ATTR_SHUTDOWN);
     time_t result = 0;
 
     if (shutdown != NULL) {
         long long result_ll;
 
         if (pcmk__scan_ll(shutdown, &result_ll, 0LL) == pcmk_rc_ok) {
             result = (time_t) result_ll;
         }
     }
     return (result == 0)? get_effective_time(data_set) : result;
 }
 
 // Primitive implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__primitive_shutdown_lock(pe_resource_t *rsc)
 {
     const char *class = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
     // Fence devices and remote connections can't be locked
     if (pcmk__str_eq(class, PCMK_RESOURCE_CLASS_STONITH, pcmk__str_null_matches)
         || pe__resource_is_remote_conn(rsc, rsc->cluster)) {
         return;
     }
 
     if (rsc->lock_node != NULL) {
         // The lock was obtained from resource history
 
         if (rsc->running_on != NULL) {
             /* The resource was started elsewhere even though it is now
              * considered locked. This shouldn't be possible, but as a
              * failsafe, we don't want to disturb the resource now.
              */
             pe_rsc_info(rsc,
                         "Cancelling shutdown lock because %s is already active",
                         rsc->id);
             pe__clear_resource_history(rsc, rsc->lock_node, rsc->cluster);
             rsc->lock_node = NULL;
             rsc->lock_time = 0;
         }
 
     // Only a resource active on exactly one node can be locked
     } else if (pcmk__list_of_1(rsc->running_on)) {
         pe_node_t *node = rsc->running_on->data;
 
         if (node->details->shutdown) {
             if (node->details->unclean) {
                 pe_rsc_debug(rsc, "Not locking %s to unclean %s for shutdown",
                              rsc->id, node->details->uname);
             } else {
                 rsc->lock_node = node;
                 rsc->lock_time = shutdown_time(node, rsc->cluster);
             }
         }
     }
 
     if (rsc->lock_node == NULL) {
         // No lock needed
         return;
     }
 
     if (rsc->cluster->shutdown_lock > 0) {
         time_t lock_expiration = rsc->lock_time + rsc->cluster->shutdown_lock;
 
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown (expires @%lld)",
                     rsc->id, rsc->lock_node->details->uname,
                     (long long) lock_expiration);
         pe__update_recheck_time(++lock_expiration, rsc->cluster);
     } else {
         pe_rsc_info(rsc, "Locking %s to %s due to shutdown",
                     rsc->id, rsc->lock_node->details->uname);
     }
 
     // If resource is locked to one node, ban it from all other nodes
     for (GList *item = rsc->cluster->nodes; item != NULL; item = item->next) {
         pe_node_t *node = item->data;
 
         if (strcmp(node->details->uname, rsc->lock_node->details->uname)) {
             resource_location(rsc, node, -CRM_SCORE_INFINITY,
                               XML_CONFIG_ATTR_SHUTDOWN_LOCK, rsc->cluster);
         }
     }
 }