diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index 75f99743a2..40812aeb73 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,740 +1,741 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_INTERNAL__H
 #  define PE_INTERNAL__H
 
 #  include <stdbool.h>
 #  include <stdint.h>
 #  include <string.h>
 #  include <crm/msg_xml.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/remote_internal.h>
 #  include <crm/common/internal.h>
 #  include <crm/common/options_internal.h>
 #  include <crm/common/output_internal.h>
 
 const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts);
 
 enum pe__clone_flags {
     // Whether instances should be started sequentially
     pe__clone_ordered               = (1 << 0),
 
     // Whether promotion scores have been added
     pe__clone_promotion_added       = (1 << 1),
 
     // Whether promotion constraints have been added
     pe__clone_promotion_constrained = (1 << 2),
 };
 
 bool pe__clone_is_ordered(const pe_resource_t *clone);
 int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag);
 
 
 enum pe__group_flags {
     pe__group_ordered       = (1 << 0), // Members start sequentially
     pe__group_colocated     = (1 << 1), // Members must be on same node
 };
 
 bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags);
 pe_resource_t *pe__last_group_member(const pe_resource_t *group);
 
 
 #  define pe_rsc_info(rsc, fmt, args...)  crm_log_tag(LOG_INFO,  rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
 
 #  define pe_err(fmt...) do {           \
         was_processing_error = TRUE;    \
         pcmk__config_err(fmt);          \
     } while (0)
 
 #  define pe_warn(fmt...) do {          \
         was_processing_warning = TRUE;  \
         pcmk__config_warn(fmt);         \
     } while (0)
 
 #  define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
 #  define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
 
 #define pe__set_working_set_flags(working_set, flags_to_set) do {           \
         (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__,       \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_set), #flags_to_set);           \
     } while (0)
 
 #define pe__clear_working_set_flags(working_set, flags_to_clear) do {       \
         (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,     \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_clear), #flags_to_clear);       \
     } while (0)
 
 #define pe__set_resource_flags(resource, flags_to_set) do {                 \
         (resource)->flags = pcmk__set_flags_as(__func__, __LINE__,          \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_set), #flags_to_set);                                 \
     } while (0)
 
 #define pe__clear_resource_flags(resource, flags_to_clear) do {             \
         (resource)->flags = pcmk__clear_flags_as(__func__, __LINE__,        \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_clear), #flags_to_clear);                             \
     } while (0)
 
 #define pe__set_action_flags(action, flags_to_set) do {                     \
         (action)->flags = pcmk__set_flags_as(__func__, __LINE__,            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags(action, flags_to_clear) do {                 \
         (action)->flags = pcmk__clear_flags_as(__func__, __LINE__,          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
         action_flags = pcmk__set_flags_as(__func__, __LINE__,               \
                                           LOG_TRACE, "Action", action_name, \
                                           (action_flags),                   \
                                           (flags_to_set), #flags_to_set);   \
     } while (0)
 
 #define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
         action_flags = pcmk__clear_flags_as(__func__, __LINE__,             \
                                             LOG_TRACE,                      \
                                             "Action", action_name,          \
                                             (action_flags),                 \
                                             (flags_to_clear),               \
                                             #flags_to_clear);               \
     } while (0)
 
 #define pe__set_action_flags_as(function, line, action, flags_to_set) do {  \
         (action)->flags = pcmk__set_flags_as((function), (line),            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
         (action)->flags = pcmk__clear_flags_as((function), (line),          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_order_flags(order_flags, flags_to_set) do {                 \
         order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                          "Ordering", "constraint",          \
                                          order_flags, (flags_to_set),       \
                                          #flags_to_set);                    \
     } while (0)
 
 #define pe__clear_order_flags(order_flags, flags_to_clear) do {               \
         order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Ordering", "constraint",          \
                                            order_flags, (flags_to_clear),     \
                                            #flags_to_clear);                  \
     } while (0)
 
 // Some warnings we don't want to print every transition
 
 enum pe_warn_once_e {
     pe_wo_blind         = (1 << 0),
     pe_wo_restart_type  = (1 << 1),
     pe_wo_role_after    = (1 << 2),
     pe_wo_poweroff      = (1 << 3),
     pe_wo_require_all   = (1 << 4),
     pe_wo_order_score   = (1 << 5),
     pe_wo_neg_threshold = (1 << 6),
     pe_wo_remove_after  = (1 << 7),
     pe_wo_ping_node     = (1 << 8),
     pe_wo_order_inst    = (1 << 9),
     pe_wo_coloc_inst    = (1 << 10),
     pe_wo_group_order   = (1 << 11),
     pe_wo_group_coloc   = (1 << 12),
     pe_wo_upstart       = (1 << 13),
     pe_wo_nagios        = (1 << 14),
 };
 
 extern uint32_t pe_wo;
 
 #define pe_warn_once(pe_wo_bit, fmt...) do {    \
         if (!pcmk_is_set(pe_wo, pe_wo_bit)) {  \
             if (pe_wo_bit == pe_wo_blind) {     \
                 crm_warn(fmt);                  \
             } else {                            \
                 pe_warn(fmt);                   \
             }                                   \
             pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,       \
                                       "Warn-once", "logging", pe_wo,        \
                                       (pe_wo_bit), #pe_wo_bit);             \
         }                                       \
     } while (0);
 
 
 typedef struct pe__location_constraint_s {
     char *id;                           // Constraint XML ID
     pe_resource_t *rsc_lh;              // Resource being located
     enum rsc_role_e role_filter;        // Role to locate
     enum pe_discover_e discover_mode;   // Resource discovery
     GList *node_list_rh;              // List of pe_node_t*
 } pe__location_t;
 
 typedef struct pe__order_constraint_s {
     int id;
     uint32_t flags; // Group of enum pe_ordering flags
 
     void *lh_opaque;
     pe_resource_t *lh_rsc;
     pe_action_t *lh_action;
     char *lh_action_task;
 
     void *rh_opaque;
     pe_resource_t *rh_rsc;
     pe_action_t *rh_action;
     char *rh_action_task;
 } pe__ordering_t;
 
 const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc,
                                             bool include_bundle);
 
 int pe__clone_max(const pe_resource_t *clone);
 int pe__clone_node_max(const pe_resource_t *clone);
 int pe__clone_promoted_max(const pe_resource_t *clone);
 int pe__clone_promoted_node_max(const pe_resource_t *clone);
 void pe__create_clone_notifications(pe_resource_t *clone);
 void pe__free_clone_notification_data(pe_resource_t *clone);
 void pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
                                        pe_action_t *start, pe_action_t *started,
                                        pe_action_t *stop, pe_action_t *stopped);
 
 
 pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
                                        bool optional, bool runnable);
 
 void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
                                       bool any_demoting);
 
 bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node);
 
 void add_hash_param(GHashTable * hash, const char *name, const char *value);
 
 char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                        pe_working_set_t * data_set);
 pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
 
 void pe_metadata(pcmk__output_t *out);
 void verify_pe_options(GHashTable * options);
 
 void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed);
 
 gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
 
 pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
                                int flags);
 
 gboolean native_active(pe_resource_t * rsc, gboolean all);
 gboolean group_active(pe_resource_t * rsc, gboolean all);
 gboolean clone_active(pe_resource_t * rsc, gboolean all);
 gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
 
 //! \deprecated This function will be removed in a future release
 void native_print(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void group_print(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void clone_print(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                       void *print_data);
 
 gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
                                   const pe_node_t *node, uint32_t show_opts,
                                   const char *target_role, bool show_nodes);
 
 int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...);
 char *pe__node_display_name(pe_node_t *node, bool print_detail);
 
 
 // Clone notifications (pe_notif.c)
 void pe__order_notifs_after_fencing(const pe_action_t *action,
                                     pe_resource_t *rsc,
                                     pe_action_t *stonith_op);
 
 
 static inline const char *
 pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag)
 {
     return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
 }
 
 int pe__clone_xml(pcmk__output_t *out, va_list args);
 int pe__clone_default(pcmk__output_t *out, va_list args);
 int pe__group_xml(pcmk__output_t *out, va_list args);
 int pe__group_default(pcmk__output_t *out, va_list args);
 int pe__bundle_xml(pcmk__output_t *out, va_list args);
 int pe__bundle_html(pcmk__output_t *out, va_list args);
 int pe__bundle_text(pcmk__output_t *out, va_list args);
 int pe__node_html(pcmk__output_t *out, va_list args);
 int pe__node_text(pcmk__output_t *out, va_list args);
 int pe__node_xml(pcmk__output_t *out, va_list args);
 int pe__resource_xml(pcmk__output_t *out, va_list args);
 int pe__resource_html(pcmk__output_t *out, va_list args);
 int pe__resource_text(pcmk__output_t *out, va_list args);
 
 void native_free(pe_resource_t * rsc);
 void group_free(pe_resource_t * rsc);
 void clone_free(pe_resource_t * rsc);
 void pe__free_bundle(pe_resource_t *rsc);
 
 enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
                                           gboolean current);
 
 void pe__count_common(pe_resource_t *rsc);
 void pe__count_bundle(pe_resource_t *rsc);
 
 void common_free(pe_resource_t * rsc);
 
 pe_node_t *pe__copy_node(const pe_node_t *this_node);
 extern time_t get_effective_time(pe_working_set_t * data_set);
 
 /* Failure handling utilities (from failcounts.c) */
 
 // bit flags for fail count handling options
 enum pe_fc_flags_e {
     pe_fc_default   = (1 << 0),
     pe_fc_effective = (1 << 1), // don't count expired failures
     pe_fc_fillers   = (1 << 2), // if container, include filler failures in count
 };
 
 int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
                      time_t *last_failure, uint32_t flags,
                      const xmlNode *xml_op);
 
 pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
                                  const char *reason,
                                  pe_working_set_t *data_set);
 
 /* Functions for finding/counting a resource's active nodes */
 
 bool pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
                            pe_node_t **active, unsigned int *count_all,
                            unsigned int *count_clean);
 
 pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
                                     unsigned int *count);
 
 static inline pe_node_t *
 pe__current_node(const pe_resource_t *rsc)
 {
     return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL);
 }
 
 
 /* Binary like operators for lists of nodes */
 extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores);
 
 GHashTable *pe__node_list2table(const GList *list);
 
 static inline gpointer
 pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
 {
     if (hash) {
         return g_hash_table_lookup(hash, key);
     }
     return NULL;
 }
 
 extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
 extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
 
 void pe__show_node_weights_as(const char *file, const char *function,
                               int line, bool to_log, const pe_resource_t *rsc,
                               const char *comment, GHashTable *nodes,
                               pe_working_set_t *data_set);
 
 #define pe__show_node_weights(level, rsc, text, nodes, data_set)    \
         pe__show_node_weights_as(__FILE__, __func__, __LINE__,      \
                                  (level), (rsc), (text), (nodes), (data_set))
 
 xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key);
 
 pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task,
                            const pe_node_t *on_node, gboolean optional,
                            gboolean foo, pe_working_set_t *data_set);
 
 #  define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
 #  define delete_action(rsc, node, optional) custom_action(		\
 		rsc, delete_key(rsc), CRMD_ACTION_DELETE, node,		\
 		optional, TRUE, rsc->cluster);
 
 #  define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
 #  define stopped_action(rsc, node, optional) custom_action(		\
 		rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node,	\
 		optional, TRUE, rsc->cluster);
 
 #  define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
 #  define stop_action(rsc, node, optional) custom_action(			\
 		rsc, stop_key(rsc), CRMD_ACTION_STOP, node,		\
 		optional, TRUE, rsc->cluster);
 
 #  define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0)
 #  define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
 #  define start_action(rsc, node, optional) custom_action(		\
 		rsc, start_key(rsc), CRMD_ACTION_START, node,		\
 		optional, TRUE, rsc->cluster)
 
 #  define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
 #  define started_action(rsc, node, optional) custom_action(		\
 		rsc, started_key(rsc), CRMD_ACTION_STARTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
 #  define promote_action(rsc, node, optional) custom_action(		\
 		rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
 #  define promoted_action(rsc, node, optional) custom_action(		\
 		rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
 #  define demote_action(rsc, node, optional) custom_action(		\
 		rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node,		\
 		optional, TRUE, rsc->cluster)
 
 #  define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
 #  define demoted_action(rsc, node, optional) custom_action(		\
 		rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
                                      pe_working_set_t *data_set);
 
 pe_action_t *find_first_action(const GList *input, const char *uuid,
                                const char *task, const pe_node_t *on_node);
 
 enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name);
 
 extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
 GList *find_actions_exact(GList *input, const char *key,
                           const pe_node_t *on_node);
 GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                             const char *task, bool require_node);
 
 extern void pe_free_action(pe_action_t * action);
 
 void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
                        const char *tag, pe_working_set_t *data_set);
 
 extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
                            bool same_node_default);
 extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
 gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role);
 void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role,
                        const char *why);
 
 pe_resource_t *find_clone_instance(const pe_resource_t *rsc,
                                    const char *sub_id);
 
 extern void destroy_ticket(gpointer data);
 extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
 
 // Resources for manipulating resource names
 const char *pe_base_name_end(const char *id);
 char *clone_strip(const char *last_rsc_id);
 char *clone_zero(const char *last_rsc_id);
 
 static inline bool
 pe_base_name_eq(const pe_resource_t *rsc, const char *id)
 {
     if (id && rsc && rsc->id) {
         // Number of characters in rsc->id before any clone suffix
         size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
 
         return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
     }
     return false;
 }
 
 int pe__target_rc_from_xml(const xmlNode *xml_op);
 
 gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
 bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any);
 
 enum rsc_digest_cmp_val {
     /*! Digests are the same */
     RSC_DIGEST_MATCH = 0,
     /*! Params that require a restart changed */
     RSC_DIGEST_RESTART,
     /*! Some parameter changed.  */
     RSC_DIGEST_ALL,
     /*! rsc op didn't have a digest associated with it, so
      *  it is unknown if parameters changed or not. */
     RSC_DIGEST_UNKNOWN,
 };
 
 typedef struct op_digest_cache_s {
     enum rsc_digest_cmp_val rc;
     xmlNode *params_all;
     xmlNode *params_secure;
     xmlNode *params_restart;
     char *digest_all_calc;
     char *digest_secure_calc;
     char *digest_restart_calc;
 } op_digest_cache_t;
 
 op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
                                          guint *interval_ms,
                                          const pe_node_t *node,
                                          const xmlNode *xml_op,
                                          GHashTable *overrides,
                                          bool calc_secure,
                                          pe_working_set_t *data_set);
 
 void pe__free_digests(gpointer ptr);
 
 op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc,
                                          const xmlNode *xml_op,
                                          pe_node_t *node,
                                          pe_working_set_t *data_set);
 
 pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional,
                          const char *reason, bool priority_delay,
                          pe_working_set_t *data_set);
 void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node,
                        const char *reason, pe_action_t *dependency,
                        pe_working_set_t *data_set);
 
 char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag);
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
 void pe__add_action_expected_result(pe_action_t *action, int expected_result);
 
 void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
 
 gboolean add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref);
 
 //! \deprecated This function will be removed in a future release
 void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
                       void * print_data, gboolean print_all);
 int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
 void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
 
 pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
                           const char *score, pe_working_set_t * data_set);
 
 //! \deprecated This function will be removed in a future release
 void common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
                   const pe_node_t *node, long options, void *print_data);
 int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
                            const char *name, const pe_node_t *node,
                            unsigned int options);
 int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
                            const char *name, const pe_node_t *node,
                            unsigned int options);
 
 //! A single instance of a bundle
 typedef struct {
     int offset;                 //!< 0-origin index of this instance in bundle
     char *ipaddr;               //!< IP address associated with this instance
     pe_node_t *node;            //!< Node created for this instance
     pe_resource_t *ip;          //!< IP address resource for ipaddr
     pe_resource_t *child;       //!< Instance of bundled resource
     pe_resource_t *container;   //!< Container associated with this instance
     pe_resource_t *remote;      //!< Pacemaker Remote connection into container
 } pe__bundle_replica_t;
 
 GList *pe__bundle_containers(const pe_resource_t *bundle);
 
 int pe__bundle_max(const pe_resource_t *rsc);
 bool pe__node_is_bundle_instance(const pe_resource_t *bundle,
                                  const pe_node_t *node);
 pe_resource_t *pe__bundled_resource(const pe_resource_t *rsc);
+const pe_resource_t *pe__get_rsc_in_container(const pe_resource_t *instance);
 void pe__foreach_bundle_replica(const pe_resource_t *bundle,
                                 bool (*fn)(pe__bundle_replica_t *, void *),
                                 void *user_data);
 pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
                                        const pe_node_t *node);
 bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
 const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
                                        pe_working_set_t *data_set,
                                        xmlNode *xml, const char *field);
 const char *pe_node_attribute_calculated(const pe_node_t *node,
                                          const char *name,
                                          const pe_resource_t *rsc);
 const char *pe_node_attribute_raw(const pe_node_t *node, const char *name);
 bool pe__is_universal_clone(const pe_resource_t *rsc,
                             const pe_working_set_t *data_set);
 void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
                          pe_node_t *node, enum pe_check_parameters,
                          pe_working_set_t *data_set);
 void pe__foreach_param_check(pe_working_set_t *data_set,
                              void (*cb)(pe_resource_t*, pe_node_t*,
                                         const xmlNode*,
                                         enum pe_check_parameters));
 void pe__free_param_checks(pe_working_set_t *data_set);
 
 bool pe__shutdown_requested(const pe_node_t *node);
 void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Register xml formatting message functions.
  *
  * \param[in,out] out  Output object to register messages with
  */
 void pe__register_messages(pcmk__output_t *out);
 
 void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
                                 const pe_rule_eval_data_t *rule_data,
                                 GHashTable *hash, const char *always_first,
                                 gboolean overwrite, pe_working_set_t *data_set);
 
 bool pe__resource_is_disabled(const pe_resource_t *rsc);
 pe_action_t *pe__clear_resource_history(pe_resource_t *rsc,
                                         const pe_node_t *node,
                                         pe_working_set_t *data_set);
 
 GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
 GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
 bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
 bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
 
 bool pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node);
 bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list);
 GList *pe__filter_rsc_list(GList *rscs, GList *filter);
 GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s);
 GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s);
 
 bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
 
 gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 
 xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name);
 
 const char *pe__clone_child_id(const pe_resource_t *rsc);
 
 int pe__sum_node_health_scores(const pe_node_t *node, int base_health);
 int pe__node_health(pe_node_t *node);
 
 static inline enum pcmk__health_strategy
 pe__health_strategy(pe_working_set_t *data_set)
 {
     return pcmk__parse_health_strategy(pe_pref(data_set->config_hash,
                                                PCMK__OPT_NODE_HEALTH_STRATEGY));
 }
 
 static inline int
 pe__health_score(const char *option, pe_working_set_t *data_set)
 {
     return char2score(pe_pref(data_set->config_hash, option));
 }
 
 /*!
  * \internal
  * \brief Return a string suitable for logging as a node name
  *
  * \param[in] node  Node to return a node name string for
  *
  * \return Node name if available, otherwise node ID if available,
  *         otherwise "unspecified node" if node is NULL or "unidentified node"
  *         if node has neither a name nor ID.
  */
 static inline const char *
 pe__node_name(const pe_node_t *node)
 {
     if (node == NULL) {
         return "unspecified node";
 
     } else if (node->details->uname != NULL) {
         return node->details->uname;
 
     } else if (node->details->id != NULL) {
         return node->details->id;
 
     } else {
         return "unidentified node";
     }
 }
 
 /*!
  * \internal
  * \brief Check whether two node objects refer to the same node
  *
  * \param[in] node1  First node object to compare
  * \param[in] node2  Second node object to compare
  *
  * \return true if \p node1 and \p node2 refer to the same node
  */
 static inline bool
 pe__same_node(const pe_node_t *node1, const pe_node_t *node2)
 {
     return (node1 != NULL) && (node2 != NULL)
            && (node1->details == node2->details);
 }
 
 /*!
  * \internal
  * \brief Get the operation key from an action history entry
  *
  * \param[in] xml  Action history entry
  *
  * \return Entry's operation key
  */
 static inline const char *
 pe__xe_history_key(const xmlNode *xml)
 {
     if (xml == NULL) {
         return NULL;
     } else {
         /* @COMPAT Pacemaker <= 1.1.5 did not add the key, and used the ID
          * instead. Checking for that allows us to process old saved CIBs,
          * including some regression tests.
          */
         const char *key = crm_element_value(xml, XML_LRM_ATTR_TASK_KEY);
 
         return pcmk__str_empty(key)? ID(xml) : key;
     }
 }
 
 #endif
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 8af35958fb..e335c1a63a 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,1016 +1,1013 @@
 /*
  * Copyright 2021-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__LIBPACEMAKER_PRIVATE__H
 #  define PCMK__LIBPACEMAKER_PRIVATE__H
 
 /* This header is for the sole use of libpacemaker, so that functions can be
  * declared with G_GNUC_INTERNAL for efficiency.
  */
 
 #include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
 
 // Flags to modify the behavior of add_colocated_node_scores()
 enum pcmk__coloc_select {
     // With no other flags, apply all "with this" colocations
     pcmk__coloc_select_default      = 0,
 
     // Apply "this with" colocations instead of "with this" colocations
     pcmk__coloc_select_this_with    = (1 << 0),
 
     // Apply only colocations with non-negative scores
     pcmk__coloc_select_nonnegative  = (1 << 1),
 
     // Apply only colocations with at least one matching node
     pcmk__coloc_select_active       = (1 << 2),
 };
 
 // Flags the update_ordered_actions() method can return
 enum pcmk__updated {
     pcmk__updated_none      = 0,        // Nothing changed
     pcmk__updated_first     = (1 << 0), // First action was updated
     pcmk__updated_then      = (1 << 1), // Then action was updated
 };
 
 #define pcmk__set_updated_flags(au_flags, action, flags_to_set) do {        \
         au_flags = pcmk__set_flags_as(__func__, __LINE__,                   \
                                       LOG_TRACE, "Action update",           \
                                       (action)->uuid, au_flags,             \
                                       (flags_to_set), #flags_to_set);       \
     } while (0)
 
 #define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do {    \
         au_flags = pcmk__clear_flags_as(__func__, __LINE__,                 \
                                         LOG_TRACE, "Action update",         \
                                         (action)->uuid, au_flags,           \
                                         (flags_to_clear), #flags_to_clear); \
     } while (0)
 
 // Resource allocation methods
 struct resource_alloc_functions_s {
     /*!
      * \internal
      * \brief Assign a resource to a node
      *
      * \param[in,out] rsc     Resource to assign to a node
      * \param[in]     prefer  Node to prefer, if all else is equal
      *
      * \return Node that \p rsc is assigned to, if assigned entirely to one node
      */
     pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer);
 
     /*!
      * \internal
      * \brief Create all actions needed for a given resource
      *
      * \param[in,out] rsc  Resource to create actions for
      */
     void (*create_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Schedule any probes needed for a resource on a node
      *
      * \param[in,out] rsc   Resource to create probe for
      * \param[in,out] node  Node to create probe on
      *
      * \return true if any probe was created, otherwise false
      */
     bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node);
 
     /*!
      * \internal
      * \brief Create implicit constraints needed for a resource
      *
      * \param[in,out] rsc  Resource to create implicit constraints for
      */
     void (*internal_constraints)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Apply a colocation's score to node weights or resource priority
      *
      * Given a colocation constraint, apply its score to the dependent's
      * allowed node weights (if we are still placing resources) or priority (if
      * we are choosing promotable clone instance roles).
      *
      * \param[in,out] dependent      Dependent resource in colocation
      * \param[in]     primary        Primary resource in colocation
      * \param[in]     colocation     Colocation constraint to apply
      * \param[in]     for_dependent  true if called on behalf of dependent
      */
     void (*apply_coloc_score) (pe_resource_t *dependent,
                                const pe_resource_t *primary,
                                const pcmk__colocation_t *colocation,
                                bool for_dependent);
 
     /*!
      * \internal
      * \brief Create list of all resources in colocations with a given resource
      *
      * Given a resource, create a list of all resources involved in mandatory
      * colocations with it, whether directly or indirectly via chained colocations.
      *
      * \param[in]     rsc             Resource to add to colocated list
      * \param[in]     orig_rsc        Resource originally requested
      * \param[in,out] colocated_rscs  Existing list
      *
      * \return List of given resource and all resources involved in colocations
      *
      * \note This function is recursive; top-level callers should pass NULL as
      *       \p colocated_rscs and \p orig_rsc, and the desired resource as
      *       \p rsc. The recursive calls will use other values.
      */
     GList *(*colocated_resources)(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc,
                                   GList *colocated_rscs);
 
     /*!
      * \internal
      * \brief Add colocations affecting a resource as primary to a list
      *
      * Given a resource being assigned (\p orig_rsc) and a resource somewhere in
      * its chain of ancestors (\p rsc, which may be \p orig_rsc), get
      * colocations that affect the ancestor as primary and should affect the
      * resource, and add them to a given list.
      *
      * \param[in]     rsc       Resource whose colocations should be added
      * \param[in]     orig_rsc  Affected resource (\p rsc or a descendant)
      * \param[in,out] list      List of colocations to add to
      *
      * \note All arguments should be non-NULL.
      * \note The pcmk__with_this_colocations() wrapper should usually be used
      *       instead of using this method directly.
      */
     void (*with_this_colocations)(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
     /*!
      * \internal
      * \brief Add colocations affecting a resource as dependent to a list
      *
      * Given a resource being assigned (\p orig_rsc) and a resource somewhere in
      * its chain of ancestors (\p rsc, which may be \p orig_rsc), get
      * colocations that affect the ancestor as dependent and should affect the
      * resource, and add them to a given list.
      *
      *
      * \param[in]     rsc       Resource whose colocations should be added
      * \param[in]     orig_rsc  Affected resource (\p rsc or a descendant)
      * \param[in,out] list      List of colocations to add to
      *
      * \note All arguments should be non-NULL.
      * \note The pcmk__this_with_colocations() wrapper should usually be used
      *       instead of using this method directly.
      */
     void (*this_with_colocations)(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
     /*!
      * \internal
      * \brief Update nodes with scores of colocated resources' nodes
      *
      * Given a table of nodes and a resource, update the nodes' scores with the
      * scores of the best nodes matching the attribute used for each of the
      * resource's relevant colocations.
      *
      * \param[in,out] rsc      Resource to check colocations for
      * \param[in]     log_id   Resource ID to use in logs (if NULL, use \p rsc ID)
      * \param[in,out] nodes    Nodes to update
      * \param[in]     attr     Colocation attribute (NULL to use default)
      * \param[in]     factor   Incorporate scores multiplied by this factor
      * \param[in]     flags    Bitmask of enum pcmk__coloc_select values
      *
      * \note The caller remains responsible for freeing \p *nodes.
      */
     void (*add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id,
                                       GHashTable **nodes, const char *attr,
                                       float factor, uint32_t flags);
 
     /*!
      * \internal
      * \brief Apply a location constraint to a resource's allowed node scores
      *
      * \param[in,out] rsc       Resource to apply constraint to
      * \param[in,out] location  Location constraint to apply
      */
     void (*apply_location)(pe_resource_t *rsc, pe__location_t *location);
 
     /*!
      * \internal
      * \brief Return action flags for a given resource action
      *
      * \param[in,out] action  Action to get flags for
      * \param[in]     node    If not NULL, limit effects to this node
      *
      * \return Flags appropriate to \p action on \p node
      * \note For primitives, this will be the same as action->flags regardless
      *       of node. For collective resources, the flags can differ due to
      *       multiple instances possibly being involved.
      */
     enum pe_action_flags (*action_flags)(pe_action_t *action,
                                          const pe_node_t *node);
 
     /*!
      * \internal
      * \brief Update two actions according to an ordering between them
      *
      * Given information about an ordering of two actions, update the actions'
      * flags (and runnable_before members if appropriate) as appropriate for the
      * ordering. Effects may cascade to other orderings involving the actions as
      * well.
      *
      * \param[in,out] first     'First' action in an ordering
      * \param[in,out] then      'Then' action in an ordering
      * \param[in]     node      If not NULL, limit scope of ordering to this
      *                          node (only used when interleaving instances)
      * \param[in]     flags     Action flags for \p first for ordering purposes
      * \param[in]     filter    Action flags to limit scope of certain updates
      *                          (may include pe_action_optional to affect only
      *                          mandatory actions, and pe_action_runnable to
      *                          affect only runnable actions)
      * \param[in]     type      Group of enum pe_ordering flags to apply
      * \param[in,out] data_set  Cluster working set
      *
      * \return Group of enum pcmk__updated flags indicating what was updated
      */
     uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then,
                                        const pe_node_t *node, uint32_t flags,
                                        uint32_t filter, uint32_t type,
                                        pe_working_set_t *data_set);
 
     void (*output_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add a resource's actions to the transition graph
      *
      * \param[in,out] rsc  Resource whose actions should be added
      */
     void (*add_actions_to_graph)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add meta-attributes relevant to transition graph actions to XML
      *
      * If a given resource supports variant-specific meta-attributes that are
      * needed for transition graph actions, add them to a given XML element.
      *
      * \param[in]     rsc  Resource whose meta-attributes should be added
      * \param[in,out] xml  Transition graph action attributes XML to add to
      */
     void (*add_graph_meta)(const pe_resource_t *rsc, xmlNode *xml);
 
     /*!
      * \internal
      * \brief Add a resource's utilization to a table of utilization values
      *
      * This function is used when summing the utilization of a resource and all
      * resources colocated with it, to determine whether a node has sufficient
      * capacity. Given a resource and a table of utilization values, it will add
      * the resource's utilization to the existing values, if the resource has
      * not yet been allocated to a node.
      *
      * \param[in]     rsc          Resource with utilization to add
      * \param[in]     orig_rsc     Resource being allocated (for logging only)
      * \param[in]     all_rscs     List of all resources that will be summed
      * \param[in,out] utilization  Table of utilization values to add to
      */
     void (*add_utilization)(const pe_resource_t *rsc,
                             const pe_resource_t *orig_rsc, GList *all_rscs,
                             GHashTable *utilization);
 
     /*!
      * \internal
      * \brief Apply a shutdown lock for a resource, if appropriate
      *
      * \param[in,out] rsc       Resource to check for shutdown lock
      */
     void (*shutdown_lock)(pe_resource_t *rsc);
 };
 
 // Actions (pcmk_sched_actions.c)
 
 G_GNUC_INTERNAL
 void pcmk__update_action_for_orderings(pe_action_t *action,
                                        pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
                                       const pe_node_t *node, uint32_t flags,
                                       uint32_t filter, uint32_t type,
                                       pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__log_action(const char *pre_text, const pe_action_t *action,
                       bool details);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
                                      guint interval_ms, const pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__deduplicate_action_inputs(pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__output_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
                                const xmlNode *xml_op);
 
 G_GNUC_INTERNAL
 void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
 
 
 // Recurring actions (pcmk_sched_recurring.c)
 
 G_GNUC_INTERNAL
 void pcmk__create_recurring_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id,
                            const char *task, guint interval_ms,
                            const pe_node_t *node, const char *reason);
 
 G_GNUC_INTERNAL
 void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
                                 guint interval_ms, pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_is_recurring(const pe_action_t *action);
 
 
 // Producing transition graphs (pcmk_graph_producer.c)
 
 G_GNUC_INTERNAL
 bool pcmk__graph_has_loop(const pe_action_t *init_action,
                           const pe_action_t *action,
                           pe_action_wrapper_t *input);
 
 G_GNUC_INTERNAL
 void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_graph(pe_working_set_t *data_set);
 
 
 // Fencing (pcmk_sched_fencing.c)
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
                             pe_action_t *action, enum pe_ordering order);
 
 G_GNUC_INTERNAL
 void pcmk__fence_guest(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__node_unfenced(const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
 
 
 // Injected scheduler inputs (pcmk_sched_injections.c)
 
 void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
                                   const pcmk_injections_t *injections);
 
 
 // Constraints of any type (pcmk_sched_constraints.c)
 
 G_GNUC_INTERNAL
 pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
                                    const pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__valid_resource_or_tag(const pe_working_set_t *data_set,
                                  const char *id, pe_resource_t **rsc,
                                  pe_tag_t **tag);
 
 G_GNUC_INTERNAL
 bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
                       bool convert_rsc, const pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__create_internal_constraints(pe_working_set_t *data_set);
 
 
 // Location constraints
 
 G_GNUC_INTERNAL
 void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
                                    int node_weight, const char *discover_mode,
                                    pe_node_t *foo_node,
                                    pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_locations(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint);
 
 
 // Colocation constraints (pcmk_sched_colocation.c)
 
 enum pcmk__coloc_affects {
     pcmk__coloc_affects_nothing = 0,
     pcmk__coloc_affects_location,
     pcmk__coloc_affects_role,
 };
 
 G_GNUC_INTERNAL
 enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t *dependent,
                                                   const pe_resource_t *primary,
                                                   const pcmk__colocation_t *colocation,
                                                   bool preview);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_weights(pe_resource_t *dependent,
                                   const pe_resource_t *primary,
                                   const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
                                      GHashTable **nodes, const char *attr,
                                      float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__add_dependent_scores(gpointer data, gpointer user_data);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__add_this_with_list(GList **list, GList *addition);
 
 G_GNUC_INTERNAL
 void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__add_with_this_list(GList **list, GList *addition);
 
 G_GNUC_INTERNAL
 void pcmk__new_colocation(const char *id, const char *node_attr, int score,
                           pe_resource_t *dependent, pe_resource_t *primary,
                           const char *dependent_role, const char *primary_role,
                           bool influence, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__block_colocation_dependents(pe_action_t *action,
                                        pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Check whether colocation's dependent preferences should be considered
  *
  * \param[in] colocation  Colocation constraint
  * \param[in] rsc         Primary instance (normally this will be
  *                        colocation->primary, which NULL will be treated as,
  *                        but for clones or bundles with multiple instances
  *                        this can be a particular instance)
  *
  * \return true if colocation influence should be effective, otherwise false
  */
 static inline bool
 pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
                                const pe_resource_t *rsc)
 {
     if (rsc == NULL) {
         rsc = colocation->primary;
     }
 
     /* A bundle replica colocates its remote connection with its container,
      * using a finite score so that the container can run on Pacemaker Remote
      * nodes.
      *
      * Moving a connection is lightweight and does not interrupt the service,
      * while moving a container is heavyweight and does interrupt the service,
      * so don't move a clean, active container based solely on the preferences
      * of its connection.
      *
      * This also avoids problematic scenarios where two containers want to
      * perpetually swap places.
      */
     if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes)
         && !pcmk_is_set(rsc->flags, pe_rsc_failed)
         && pcmk__list_of_1(rsc->running_on)) {
         return false;
     }
 
     /* The dependent in a colocation influences the primary's location
      * if the influence option is true or the primary is not yet active.
      */
     return colocation->influence || (rsc->running_on == NULL);
 }
 
 
 // Ordering constraints (pcmk_sched_ordering.c)
 
 G_GNUC_INTERNAL
 void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task,
                         pe_action_t *first_action, pe_resource_t *then_rsc,
                         char *then_task, pe_action_t *then_action,
                         uint32_t flags, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_stops_before_shutdown(pe_node_t *node,
                                        pe_action_t *shutdown_op);
 
 G_GNUC_INTERNAL
 void pcmk__apply_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_after_each(pe_action_t *after, GList *list);
 
 
 /*!
  * \internal
  * \brief Create a new ordering between two resource actions
  *
  * \param[in,out] first_rsc   Resource for 'first' action
  * \param[in,out] first_task  Action key for 'first' action
  * \param[in]     then_rsc    Resource for 'then' action
  * \param[in,out] then_task   Action key for 'then' action
  * \param[in]     flags       Bitmask of enum pe_ordering flags
  */
 #define pcmk__order_resource_actions(first_rsc, first_task,                 \
                                      then_rsc, then_task, flags)            \
     pcmk__new_ordering((first_rsc),                                         \
                        pcmk__op_key((first_rsc)->id, (first_task), 0),      \
                        NULL,                                                \
                        (then_rsc),                                          \
                        pcmk__op_key((then_rsc)->id, (then_task), 0),        \
                        NULL, (flags), (first_rsc)->cluster)
 
 #define pcmk__order_starts(rsc1, rsc2, flags)                \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_START,  \
                                  (rsc2), CRMD_ACTION_START, (flags))
 
 #define pcmk__order_stops(rsc1, rsc2, flags)                 \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP,   \
                                  (rsc2), CRMD_ACTION_STOP, (flags))
 
 
 // Ticket constraints (pcmk_sched_tickets.c)
 
 G_GNUC_INTERNAL
 void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 
 // Promotable clone resources (pcmk_sched_promotable.c)
 
 G_GNUC_INTERNAL
 void pcmk__add_promotion_scores(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__require_promotion_tickets(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__set_instance_roles(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_promotable_actions(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__promotable_restart_ordering(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__order_promotable_instances(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
                                             pe_resource_t *dependent,
                                             const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
                                                 pe_resource_t *dependent,
                                                 const pcmk__colocation_t *colocation);
 
 
 // Pacemaker Remote nodes (pcmk_sched_remote.c)
 
 G_GNUC_INTERNAL
 bool pcmk__is_failed_remote_node(const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc,
                                     const pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__connection_host_for_action(const pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
 
 G_GNUC_INTERNAL
 void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action);
 
 
 // Primitives (pcmk_sched_primitive.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 enum pe_action_flags pcmk__primitive_action_flags(pe_action_t *action,
                                                   const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
                                        const pe_resource_t *primary,
                                        const pcmk__colocation_t *colocation,
                                        bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_primitive_colocations(const pe_resource_t *rsc,
                                       const pe_resource_t *orig_rsc,
                                       GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_with_colocations(const pe_resource_t *rsc,
                                       const pe_resource_t *orig_rsc,
                                       GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node,
                             bool optional);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_add_utilization(const pe_resource_t *rsc,
                                      const pe_resource_t *orig_rsc,
                                      GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_shutdown_lock(pe_resource_t *rsc);
 
 
 // Groups (pcmk_sched_group.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__group_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__group_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_coloc_score(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_group_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__group_with_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc,
                                            const char *log_id,
                                            GHashTable **nodes, const char *attr,
                                            float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location);
 
 G_GNUC_INTERNAL
 enum pe_action_flags pcmk__group_action_flags(pe_action_t *action,
                                               const pe_node_t *node);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__group_update_ordered_actions(pe_action_t *first,
                                             pe_action_t *then,
                                             const pe_node_t *node,
                                             uint32_t flags, uint32_t filter,
                                             uint32_t type,
                                             pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 GList *pcmk__group_colocated_resources(const pe_resource_t *rsc,
                                        const pe_resource_t *orig_rsc,
                                        GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_utilization(const pe_resource_t *rsc,
                                  const pe_resource_t *orig_rsc, GList *all_rscs,
                                  GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__group_shutdown_lock(pe_resource_t *rsc);
 
 
 // Clones (pcmk_sched_clone.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__clone_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__clone_create_probe(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__clone_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_clone_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__clone_with_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_location(pe_resource_t *rsc, pe__location_t *constraint);
 
 G_GNUC_INTERNAL
 enum pe_action_flags pcmk__clone_action_flags(pe_action_t *action,
                                               const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_actions_to_graph(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_utilization(const pe_resource_t *rsc,
                                  const pe_resource_t *orig_rsc,
                                  GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__clone_shutdown_lock(pe_resource_t *rsc);
 
 // Bundles (pcmk_sched_bundle.c)
 
-G_GNUC_INTERNAL
-const pe_resource_t *pcmk__get_rsc_in_container(const pe_resource_t *instance);
-
 G_GNUC_INTERNAL
 void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
                                     const pe_resource_t *primary,
                                     const pcmk__colocation_t *colocation,
                                     bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_bundle_colocations(const pe_resource_t *rsc,
                                    const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_with_colocations(const pe_resource_t *rsc,
                                    const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__output_bundle_actions(pe_resource_t *rsc);
 
 
 // Clone instances or bundle replica containers (pcmk_sched_instances.c)
 
 G_GNUC_INTERNAL
 void pcmk__assign_instances(pe_resource_t *collective, GList *instances,
                             int max_total, int max_per_node);
 
 G_GNUC_INTERNAL
 void pcmk__create_instance_actions(pe_resource_t *rsc, GList *instances);
 
 G_GNUC_INTERNAL
 bool pcmk__instance_matches(const pe_resource_t *instance,
                             const pe_node_t *node, enum rsc_role_e role,
                             bool current);
 
 G_GNUC_INTERNAL
 pe_resource_t *pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
                                               const pe_resource_t *rsc,
                                               enum rsc_role_e role,
                                               bool current);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__instance_update_ordered_actions(pe_action_t *first,
                                                pe_action_t *then,
                                                const pe_node_t *node,
                                                uint32_t flags, uint32_t filter,
                                                uint32_t type,
                                                pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 enum pe_action_flags pcmk__collective_action_flags(pe_action_t *action,
                                                    const GList *instances,
                                                    const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__add_collective_constraints(GList **list,
                                       const pe_resource_t *instance,
                                       const pe_resource_t *collective,
                                       bool with_this);
 
 
 // Injections (pcmk_injections.c)
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
                                         bool up);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
                                        const char *resource,
                                        const char *lrm_name,
                                        const char *rclass,
                                        const char *rtype,
                                        const char *rprovider);
 
 G_GNUC_INTERNAL
 void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
                             const char *resource, const char *task,
                             guint interval_ms, int rc);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
                                     lrmd_event_data_t *op, int target_rc);
 
 
 // Nodes (pcmk_sched_nodes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__node_available(const pe_node_t *node, bool consider_score,
                           bool consider_guest);
 
 G_GNUC_INTERNAL
 bool pcmk__any_node_available(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GHashTable *pcmk__copy_node_table(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node);
 
 G_GNUC_INTERNAL
 void pcmk__apply_node_health(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
                                   const pe_node_t *node);
 
 
 // Functions applying to more than one variant (pcmk_sched_resource.c)
 
 G_GNUC_INTERNAL
 void pcmk__set_allocation_methods(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
                              const xmlNode *rsc_entry, bool active_on_node);
 
 G_GNUC_INTERNAL
 GList *pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 GList *pcmk__colocated_resources(const pe_resource_t *rsc,
                                  const pe_resource_t *orig_rsc,
                                  GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__output_resource_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__finalize_assignment(pe_resource_t *rsc, pe_node_t *chosen,
                                bool force);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
 
 G_GNUC_INTERNAL
 void pcmk__unassign_resource(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
                              pe_resource_t **failed);
 
 G_GNUC_INTERNAL
 void pcmk__sort_resources(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
 
 
 // Functions related to probes (pcmk_sched_probes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_probes(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_probes(pe_working_set_t *data_set);
 
 
 // Functions related to live migration (pcmk_sched_migration.c)
 
 void pcmk__create_migration_actions(pe_resource_t *rsc,
                                     const pe_node_t *current);
 
 void pcmk__abort_dangling_migration(void *data, void *user_data);
 
 bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current);
 
 void pcmk__order_migration_equivalents(pe__ordering_t *order);
 
 
 // Functions related to node utilization (pcmk_sched_utilization.c)
 
 G_GNUC_INTERNAL
 int pcmk__compare_node_capacities(const pe_node_t *node1,
                                   const pe_node_t *node2);
 
 G_GNUC_INTERNAL
 void pcmk__consume_node_capacity(GHashTable *current_utilization,
                                  const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__release_node_capacity(GHashTable *current_utilization,
                                  const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_utilization_constraints(pe_resource_t *rsc,
                                           const GList *allowed_nodes);
 
 G_GNUC_INTERNAL
 void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
 
 #endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_bundle.c b/lib/pacemaker/pcmk_sched_bundle.c
index c38ad0081d..cbb1ed0b75 100644
--- a/lib/pacemaker/pcmk_sched_bundle.c
+++ b/lib/pacemaker/pcmk_sched_bundle.c
@@ -1,946 +1,916 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 #define PE__VARIANT_BUNDLE 1
 #include <lib/pengine/variant.h>
 
 /*!
  * \internal
  * \brief Assign a single bundle replica's resources (other than container)
  *
  * \param[in,out] replica    Replica to assign
  * \param[in]     user_data  Preferred node, if any
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 assign_replica(pe__bundle_replica_t *replica, void *user_data)
 {
     pe_node_t *container_host = NULL;
     const pe_node_t *prefer = user_data;
     const pe_resource_t *bundle = pe__const_top_resource(replica->container,
                                                          true);
 
     if (replica->ip != NULL) {
         pe_rsc_trace(bundle, "Assigning bundle %s IP %s",
                      bundle->id, replica->ip->id);
         replica->ip->cmds->assign(replica->ip, prefer);
     }
 
     container_host = replica->container->allocated_to;
     if (replica->remote != NULL) {
         if (pe__is_guest_or_remote_node(container_host)) {
             /* REMOTE_CONTAINER_HACK: "Nested" connection resources must be on
              * the same host because Pacemaker Remote only supports a single
              * active connection.
              */
             pcmk__new_colocation("child-remote-with-docker-remote", NULL,
                                  INFINITY, replica->remote,
                                  container_host->details->remote_rsc, NULL,
                                  NULL, true, bundle->cluster);
         }
         pe_rsc_trace(bundle, "Assigning bundle %s connection %s",
                      bundle->id, replica->remote->id);
         replica->remote->cmds->assign(replica->remote, prefer);
     }
 
     if (replica->child != NULL) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, replica->child->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
             if (!pe__same_node(node, replica->node)
                 || !pcmk__threshold_reached(replica->child, node, NULL)) {
                 node->weight = INFINITY;
             }
         }
 
         pe__set_resource_flags(replica->child->parent, pe_rsc_allocating);
         pe_rsc_trace(bundle, "Assigning bundle %s replica child %s",
                      bundle->id, replica->child->id);
         replica->child->cmds->assign(replica->child, replica->node);
         pe__clear_resource_flags(replica->child->parent, pe_rsc_allocating);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Assign a bundle resource to a node
  *
  * \param[in,out] rsc     Resource to assign to a node
  * \param[in]     prefer  Node to prefer, if all else is equal
  *
  * \return Node that \p rsc is assigned to, if assigned entirely to one node
  */
 pe_node_t *
 pcmk__bundle_allocate(pe_resource_t *rsc, const pe_node_t *prefer)
 {
     GList *containers = NULL;
     pe_resource_t *bundled_resource = NULL;
 
     CRM_CHECK(rsc != NULL, return NULL);
 
     pe__set_resource_flags(rsc, pe_rsc_allocating);
     containers = pe__bundle_containers(rsc);
 
     pe__show_node_weights(!pcmk_is_set(rsc->cluster->flags, pe_flag_show_scores),
                           rsc, __func__, rsc->allowed_nodes, rsc->cluster);
 
     containers = g_list_sort(containers, pcmk__cmp_instance);
     pcmk__assign_instances(rsc, containers, pe__bundle_max(rsc),
                            rsc->fns->max_per_node(rsc));
     g_list_free(containers);
 
     pe__foreach_bundle_replica(rsc, assign_replica, (void *) prefer);
 
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource != NULL) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
         g_hash_table_iter_init(&iter, bundled_resource->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (gpointer *) & node)) {
             if (pe__node_is_bundle_instance(rsc, node)) {
                 node->weight = 0;
             } else {
                 node->weight = -INFINITY;
             }
         }
         pe_rsc_trace(rsc, "Allocating bundle %s child %s",
                      rsc->id, bundled_resource->id);
         bundled_resource->cmds->assign(bundled_resource, prefer);
     }
 
     pe__clear_resource_flags(rsc, pe_rsc_allocating|pe_rsc_provisional);
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Create actions for a bundle replica's resources (other than container)
  *
  * \param[in,out] replica    Replica to create actions for
  * \param[in]     user_data  Unused
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 create_replica_actions(pe__bundle_replica_t *replica, void *user_data)
 {
     if (replica->ip != NULL) {
         replica->ip->cmds->create_actions(replica->ip);
     }
     if (replica->container != NULL) {
         replica->container->cmds->create_actions(replica->container);
     }
     if (replica->remote != NULL) {
         replica->remote->cmds->create_actions(replica->remote);
     }
     return true;
 }
 
 void
 pcmk__bundle_create_actions(pe_resource_t *rsc)
 {
     pe_action_t *action = NULL;
     GList *containers = NULL;
     pe_resource_t *bundled_resource = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     pe__foreach_bundle_replica(rsc, create_replica_actions, NULL);
 
     containers = pe__bundle_containers(rsc);
     pcmk__create_instance_actions(rsc, containers);
 
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource != NULL) {
         bundled_resource->cmds->create_actions(bundled_resource);
 
         if (pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) {
             /* promote */
             pe__new_rsc_pseudo_action(rsc, RSC_PROMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, RSC_PROMOTED, true, true);
             action->priority = INFINITY;
 
             /* demote */
             pe__new_rsc_pseudo_action(rsc, RSC_DEMOTE, true, true);
             action = pe__new_rsc_pseudo_action(rsc, RSC_DEMOTED, true, true);
             action->priority = INFINITY;
         }
     }
 
     g_list_free(containers);
 }
 
 /*!
  * \internal
  * \brief Create internal constraints for a bundle replica's resources
  *
  * \param[in,out] replica    Replica to create internal constraints for
  * \param[in,out] user_data  Replica's parent bundle
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 replica_internal_constraints(pe__bundle_replica_t *replica, void *user_data)
 {
     pe_resource_t *bundle = user_data;
 
     replica->container->cmds->internal_constraints(replica->container);
 
     // Start bundle -> start replica container
     pcmk__order_starts(bundle, replica->container,
                        pe_order_runnable_left|pe_order_implies_first_printed);
 
     // Stop bundle -> stop replica child and container
     if (replica->child != NULL) {
         pcmk__order_stops(bundle, replica->child,
                           pe_order_implies_first_printed);
     }
     pcmk__order_stops(bundle, replica->container,
                       pe_order_implies_first_printed);
 
     // Start replica container -> bundle is started
     pcmk__order_resource_actions(replica->container, RSC_START, bundle,
                                  RSC_STARTED,
                                  pe_order_implies_then_printed);
 
     // Stop replica container -> bundle is stopped
     pcmk__order_resource_actions(replica->container, RSC_STOP, bundle,
                                  RSC_STOPPED,
                                  pe_order_implies_then_printed);
 
     if (replica->ip != NULL) {
         replica->ip->cmds->internal_constraints(replica->ip);
 
         // Replica IP address -> replica container (symmetric)
         pcmk__order_starts(replica->ip, replica->container,
                            pe_order_runnable_left|pe_order_preserve);
         pcmk__order_stops(replica->container, replica->ip,
                           pe_order_implies_first|pe_order_preserve);
 
         pcmk__new_colocation("ip-with-docker", NULL, INFINITY, replica->ip,
                              replica->container, NULL, NULL, true,
                              bundle->cluster);
     }
 
     if (replica->remote != NULL) {
         /* This handles ordering and colocating remote relative to container
          * (via "resource-with-container"). Since IP is also ordered and
          * colocated relative to the container, we don't need to do anything
          * explicit here with IP.
          */
         replica->remote->cmds->internal_constraints(replica->remote);
     }
 
     if (replica->child != NULL) {
         CRM_ASSERT(replica->remote != NULL);
         // "Start remote then child" is implicit in scheduler's remote logic
     }
     return true;
 }
 
 void
 pcmk__bundle_internal_constraints(pe_resource_t *rsc)
 {
     pe_resource_t *bundled_resource = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource != NULL) {
         pcmk__order_resource_actions(rsc, RSC_START, bundled_resource,
                                      RSC_START, pe_order_implies_first_printed);
         pcmk__order_resource_actions(rsc, RSC_STOP, bundled_resource, RSC_STOP,
                                      pe_order_implies_first_printed);
 
         if (bundled_resource->children != NULL) {
             pcmk__order_resource_actions(bundled_resource, RSC_STARTED, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed);
             pcmk__order_resource_actions(bundled_resource, RSC_STOPPED, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed);
         } else {
             pcmk__order_resource_actions(bundled_resource, RSC_START, rsc,
                                          RSC_STARTED,
                                          pe_order_implies_then_printed);
             pcmk__order_resource_actions(bundled_resource, RSC_STOP, rsc,
                                          RSC_STOPPED,
                                          pe_order_implies_then_printed);
         }
     }
 
     pe__foreach_bundle_replica(rsc, replica_internal_constraints, (void *) rsc);
 
     if (bundled_resource != NULL) {
         bundled_resource->cmds->internal_constraints(bundled_resource);
         if (pcmk_is_set(bundled_resource->flags, pe_rsc_promotable)) {
             pcmk__promotable_restart_ordering(rsc);
 
             /* child demoted before global demoted */
             pcmk__order_resource_actions(bundled_resource, RSC_DEMOTED, rsc,
                                          RSC_DEMOTED,
                                          pe_order_implies_then_printed);
 
             /* global demote before child demote */
             pcmk__order_resource_actions(rsc, RSC_DEMOTE, bundled_resource,
                                          RSC_DEMOTE,
                                          pe_order_implies_first_printed);
 
             /* child promoted before global promoted */
             pcmk__order_resource_actions(bundled_resource, RSC_PROMOTED, rsc,
                                          RSC_PROMOTED,
                                          pe_order_implies_then_printed);
 
             /* global promote before child promote */
             pcmk__order_resource_actions(rsc, RSC_PROMOTE, bundled_resource,
                                          RSC_PROMOTE,
                                          pe_order_implies_first_printed);
         }
     }
 }
 
 struct match_data {
     const pe_node_t *node;     // Node to compare against replica
     pe_resource_t *container;  // Replica container corresponding to node
 };
 
 /*!
  * \internal
  * \brief Check whether a replica container is assigned to a given node
  *
  * \param[in,out] replica    Replica to check
  * \param[in,out] user_data  struct match_data with node to compare against
  *
  * \return true if the replica does not match (to indicate further replicas
  *         should be processed), otherwise false
  */
 static bool
 match_replica_container(pe__bundle_replica_t *replica, void *user_data)
 {
     struct match_data *match_data = user_data;
 
     if (pcmk__instance_matches(replica->container, match_data->node,
                                RSC_ROLE_UNKNOWN, false)) {
         match_data->container = replica->container;
         return false; // Match found, don't bother searching further replicas
     }
     return true; // No match, keep searching
 }
 
 static pe_resource_t *
 compatible_replica_for_node(const pe_resource_t *rsc_lh,
                             const pe_node_t *candidate,
                             const pe_resource_t *rsc)
 {
     struct match_data match_data = { candidate, NULL };
 
     CRM_CHECK(candidate != NULL, return NULL);
 
     crm_trace("Looking for compatible child from %s for %s on %s",
               rsc_lh->id, rsc->id, pe__node_name(candidate));
     pe__foreach_bundle_replica(rsc, match_replica_container, &match_data);
     if (match_data.container == NULL) {
         pe_rsc_trace(rsc, "Can't pair %s with %s", rsc_lh->id, rsc->id);
     } else {
         pe_rsc_trace(rsc, "Pairing %s with %s on %s",
                      rsc_lh->id, match_data.container->id,
                      pe__node_name(candidate));
     }
     return match_data.container;
 }
 
 static pe_resource_t *
 compatible_replica(const pe_resource_t *rsc_lh, const pe_resource_t *rsc,
                    pe_working_set_t *data_set)
 {
     GList *scratch = NULL;
     pe_resource_t *pair = NULL;
     pe_node_t *active_node_lh = NULL;
 
     active_node_lh = rsc_lh->fns->location(rsc_lh, NULL, 0);
     if (active_node_lh) {
         return compatible_replica_for_node(rsc_lh, active_node_lh, rsc);
     }
 
     scratch = g_hash_table_get_values(rsc_lh->allowed_nodes);
     scratch = pcmk__sort_nodes(scratch, NULL);
 
     for (GList *gIter = scratch; gIter != NULL; gIter = gIter->next) {
         pe_node_t *node = (pe_node_t *) gIter->data;
 
         pair = compatible_replica_for_node(rsc_lh, node, rsc);
         if (pair) {
             goto done;
         }
     }
 
     pe_rsc_debug(rsc, "Can't pair %s with %s", rsc_lh->id, (rsc? rsc->id : "none"));
   done:
     g_list_free(scratch);
     return pair;
 }
 
 struct coloc_data {
     const pcmk__colocation_t *colocation;
     pe_resource_t *dependent;
     GList *container_hosts;
 };
 
 /*!
  * \internal
  * \brief Apply a colocation score to replica node weights or resource priority
  *
  * \param[in,out] replica    Replica to apply colocation score to
  * \param[in]     user_data  struct coloc_data for colocation being applied
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 replica_apply_coloc_score(pe__bundle_replica_t *replica, void *user_data)
 {
     struct coloc_data *coloc_data = user_data;
     pe_node_t *chosen = NULL;
 
     if (coloc_data->colocation->score < INFINITY) {
         replica->container->cmds->apply_coloc_score(coloc_data->dependent,
                                                     replica->container,
                                                     coloc_data->colocation,
                                                     false);
         return true;
     }
 
     chosen = replica->container->fns->location(replica->container, NULL, 0);
     if ((chosen == NULL)
         || is_set_recursive(replica->container, pe_rsc_block, true)) {
         return true;
     }
 
     if ((coloc_data->colocation->primary_role >= RSC_ROLE_PROMOTED)
         && ((replica->child == NULL)
             || (replica->child->next_role < RSC_ROLE_PROMOTED))) {
         return true;
     }
 
     pe_rsc_trace(pe__const_top_resource(replica->container, true),
                  "Allowing mandatory colocation %s using %s @%d",
                  coloc_data->colocation->id, pe__node_name(chosen),
                  chosen->weight);
     coloc_data->container_hosts = g_list_prepend(coloc_data->container_hosts,
                                                  chosen);
     return true;
 }
 
 /*!
  * \internal
  * \brief Apply a colocation's score to node weights or resource priority
  *
  * Given a colocation constraint, apply its score to the dependent's
  * allowed node weights (if we are still placing resources) or priority (if
  * we are choosing promotable clone instance roles).
  *
  * \param[in,out] dependent      Dependent resource in colocation
  * \param[in]     primary        Primary resource in colocation
  * \param[in]     colocation     Colocation constraint to apply
  * \param[in]     for_dependent  true if called on behalf of dependent
  */
 void
 pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
                                const pe_resource_t *primary,
                                const pcmk__colocation_t *colocation,
                                bool for_dependent)
 {
     struct coloc_data coloc_data = { colocation, dependent, NULL };
 
     /* This should never be called for the bundle itself as a dependent.
      * Instead, we add its colocation constraints to its replicas and call the
      * apply_coloc_score() for the replicas as dependents.
      */
     CRM_ASSERT(!for_dependent);
 
     CRM_CHECK((colocation != NULL) && (dependent != NULL) && (primary != NULL),
               return);
     CRM_ASSERT(dependent->variant == pe_native);
 
     if (pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         pe_rsc_trace(primary, "%s is still provisional", primary->id);
         return;
 
     } else if (colocation->dependent->variant > pe_group) {
         pe_resource_t *primary_replica = compatible_replica(dependent, primary,
                                                             dependent->cluster);
 
         if (primary_replica) {
             pe_rsc_debug(primary, "Pairing %s with %s",
                          dependent->id, primary_replica->id);
             dependent->cmds->apply_coloc_score(dependent, primary_replica,
                                                colocation, true);
 
         } else if (colocation->score >= INFINITY) {
             crm_notice("Cannot pair %s with instance of %s",
                        dependent->id, primary->id);
             pcmk__assign_resource(dependent, NULL, true);
 
         } else {
             pe_rsc_debug(primary, "Cannot pair %s with instance of %s",
                          dependent->id, primary->id);
         }
 
         return;
     }
 
     pe_rsc_trace(primary, "Processing constraint %s: %s -> %s %d",
                  colocation->id, dependent->id, primary->id, colocation->score);
 
     pe__foreach_bundle_replica(primary, replica_apply_coloc_score,
                                (void *) &coloc_data);
 
     if (colocation->score >= INFINITY) {
         node_list_exclude(dependent->allowed_nodes, coloc_data.container_hosts,
                           FALSE);
     }
     g_list_free(coloc_data.container_hosts);
 }
 
 // Bundle implementation of resource_alloc_functions_t:with_this_colocations()
 void
 pcmk__with_bundle_colocations(const pe_resource_t *rsc,
                               const pe_resource_t *orig_rsc, GList **list)
 {
     CRM_CHECK((rsc != NULL) && (rsc->variant == pe_container)
               && (orig_rsc != NULL) && (list != NULL),
               return);
 
     if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
         pcmk__add_with_this_list(list, rsc->rsc_cons_lhs);
 
     // Only the bundle replicas' containers get the bundle's constraints
     } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
         pcmk__add_collective_constraints(list, orig_rsc, rsc, true);
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:this_with_colocations()
 void
 pcmk__bundle_with_colocations(const pe_resource_t *rsc,
                               const pe_resource_t *orig_rsc, GList **list)
 {
     CRM_CHECK((rsc != NULL) && (rsc->variant == pe_container)
               && (orig_rsc != NULL) && (list != NULL),
               return);
 
     if (rsc == orig_rsc) { // Colocations are wanted for bundle itself
         pcmk__add_this_with_list(list, rsc->rsc_cons);
 
     // Only the bundle replicas' containers get the bundle's constraints
     } else if (pcmk_is_set(orig_rsc->flags, pe_rsc_replica_container)) {
         pcmk__add_collective_constraints(list, orig_rsc, rsc, false);
     }
 }
 
 enum pe_action_flags
 pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node)
 {
     GList *containers = NULL;
     enum pe_action_flags flags = 0;
     pe_resource_t *bundled_resource = pe__bundled_resource(action->rsc);
 
     if (bundled_resource != NULL) {
         enum action_tasks task = get_complex_task(bundled_resource,
                                                   action->task);
 
         switch(task) {
             case no_action:
             case action_notify:
             case action_notified:
             case action_promote:
             case action_promoted:
             case action_demote:
             case action_demoted:
                 return pcmk__collective_action_flags(action,
                                                      bundled_resource->children,
                                                      node);
             default:
                 break;
         }
     }
 
     containers = pe__bundle_containers(action->rsc);
     flags = pcmk__collective_action_flags(action, containers, node);
     g_list_free(containers);
     return flags;
 }
 
-/*!
- * \internal
- * \brief Get containerized resource corresponding to a given bundle container
- *
- * \param[in] instance  Collective instance that might be a bundle container
- *
- * \return Bundled resource instance inside \p instance if it is a bundle
- *         container instance, otherwise NULL
- */
-const pe_resource_t *
-pcmk__get_rsc_in_container(const pe_resource_t *instance)
-{
-    const pe__bundle_variant_data_t *data = NULL;
-    const pe_resource_t *top = pe__const_top_resource(instance, true);
-
-    if ((top == NULL) || (top->variant != pe_container)) {
-        return NULL;
-    }
-    get_bundle_variant_data(data, top);
-
-    for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
-        const pe__bundle_replica_t *replica = iter->data;
-
-        if (instance == replica->container) {
-            return replica->child;
-        }
-    }
-    return NULL;
-}
-
 /*!
  * \internal
  * \brief Apply a location constraint to a bundle replica
  *
  * \param[in,out] replica    Replica to apply constraint to
  * \param[in,out] user_data  Location constraint to apply
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 apply_location_to_replica(pe__bundle_replica_t *replica, void *user_data)
 {
     pe__location_t *location = user_data;
 
     if (replica->container != NULL) {
         replica->container->cmds->apply_location(replica->container, location);
     }
     if (replica->ip != NULL) {
         replica->ip->cmds->apply_location(replica->ip, location);
     }
     return true;
 }
 
 void
 pcmk__bundle_rsc_location(pe_resource_t *rsc, pe__location_t *constraint)
 {
     pe_resource_t *bundled_resource = NULL;
 
     pcmk__apply_location(rsc, constraint);
     pe__foreach_bundle_replica(rsc, apply_location_to_replica, constraint);
 
     bundled_resource = pe__bundled_resource(rsc);
     if ((bundled_resource != NULL)
         && ((constraint->role_filter == RSC_ROLE_UNPROMOTED)
             || (constraint->role_filter == RSC_ROLE_PROMOTED))) {
         bundled_resource->cmds->apply_location(bundled_resource,
                                                constraint);
         bundled_resource->rsc_location = g_list_prepend(bundled_resource->rsc_location,
                                                         constraint);
     }
 }
 
 #define XPATH_REMOTE "//nvpair[@name='" XML_RSC_ATTR_REMOTE_RA_ADDR "']"
 
 /*!
  * \internal
  * \brief Add a bundle replica's actions to transition graph
  *
  * \param[in,out] replica    Replica to add to graph
  * \param[in]     user_data  Preferred node, if any
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 add_replica_actions_to_graph(pe__bundle_replica_t *replica, void *user_data)
 {
     if ((replica->remote != NULL) && (replica->container != NULL)
         && pe__bundle_needs_remote_name(replica->remote)) {
 
         /* REMOTE_CONTAINER_HACK: Allow remote nodes to run containers that
          * run pacemaker-remoted inside, without needing a separate IP for
          * the container. This is done by configuring the inner remote's
          * connection host as the magic string "#uname", then
          * replacing it with the underlying host when needed.
          */
         xmlNode *nvpair = get_xpath_object(XPATH_REMOTE, replica->remote->xml,
                                            LOG_ERR);
         const char *calculated_addr = NULL;
 
         // Replace the value in replica->remote->xml (if appropriate)
         calculated_addr = pe__add_bundle_remote_name(replica->remote,
                                                      replica->remote->cluster,
                                                      nvpair, "value");
         if (calculated_addr != NULL) {
             /* Since this is for the bundle as a resource, and not any
              * particular action, replace the value in the default
              * parameters (not evaluated for node). create_graph_action()
              * will grab it from there to replace it in node-evaluated
              * parameters.
              */
             GHashTable *params = pe_rsc_params(replica->remote,
                                                NULL, replica->remote->cluster);
 
             g_hash_table_replace(params,
                                  strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                  strdup(calculated_addr));
         } else {
             /* The only way to get here is if the remote connection is
              * neither currently running nor scheduled to run. That means we
              * won't be doing any operations that require addr (only start
              * requires it; we additionally use it to compare digests when
              * unpacking status, promote, and migrate_from history, but
              * that's already happened by this point).
              */
             crm_info("Unable to determine address for bundle %s remote connection",
                      pe__const_top_resource(replica->remote, true)->id);
         }
     }
     if (replica->ip != NULL) {
         replica->ip->cmds->add_actions_to_graph(replica->ip);
     }
     if (replica->container != NULL) {
         replica->container->cmds->add_actions_to_graph(replica->container);
     }
     if (replica->remote != NULL) {
         replica->remote->cmds->add_actions_to_graph(replica->remote);
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Add a resource's actions to the transition graph
  *
  * \param[in,out] rsc  Resource whose actions should be added
  */
 void
 pcmk__bundle_expand(pe_resource_t *rsc)
 {
     pe_resource_t *bundled_resource = NULL;
 
     CRM_CHECK(rsc != NULL, return);
 
     bundled_resource = pe__bundled_resource(rsc);
     if (bundled_resource != NULL) {
         bundled_resource->cmds->add_actions_to_graph(bundled_resource);
     }
     pe__foreach_bundle_replica(rsc, add_replica_actions_to_graph, NULL);
 }
 
 struct probe_data {
     pe_resource_t *bundle;  // Bundle being probed
     pe_node_t *node;        // Node to create probes on
     bool any_created;       // Whether any probes have been created
 };
 
 /*!
  * \internal
  * \brief Order a bundle replica's start after another replica's probe
  *
  * \param[in,out] replica    Replica to order start for
  * \param[in]     user_data  Replica with probe to order after
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 order_replica_start_after(pe__bundle_replica_t *replica, void *user_data)
 {
     pe__bundle_replica_t *probed_replica = user_data;
 
     if ((replica == probed_replica) || (replica->container == NULL)) {
         return true;
     }
     pcmk__new_ordering(probed_replica->container,
                        pcmk__op_key(probed_replica->container->id, RSC_STATUS,
                                     0),
                        NULL, replica->container,
                        pcmk__op_key(replica->container->id, RSC_START, 0), NULL,
                        pe_order_optional|pe_order_same_node,
                        replica->container->cluster);
     return true;
 }
 
 /*!
  * \internal
  * \brief Create probes for a bundle replica's resources
  *
  * \param[in,out] replica    Replica to create probes for
  * \param[in]     user_data  struct probe_data
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 create_replica_probes(pe__bundle_replica_t *replica, void *user_data)
 {
     struct probe_data *probe_data = user_data;
 
     if ((replica->ip != NULL)
         && replica->ip->cmds->create_probe(replica->ip, probe_data->node)) {
         probe_data->any_created = true;
     }
     if ((replica->child != NULL)
         && pe__same_node(probe_data->node, replica->node)
         && replica->child->cmds->create_probe(replica->child, probe_data->node)) {
         probe_data->any_created = true;
     }
     if ((replica->container != NULL)
         && replica->container->cmds->create_probe(replica->container,
                                                   probe_data->node)) {
         probe_data->any_created = true;
 
         /* If we're limited to one replica per host (due to
          * the lack of an IP range probably), then we don't
          * want any of our peer containers starting until
          * we've established that no other copies are already
          * running.
          *
          * Partly this is to ensure that the maximum replicas per host is
          * observed, but also to ensure that the containers
          * don't fail to start because the necessary port
          * mappings (which won't include an IP for uniqueness)
          * are already taken
          */
         if (probe_data->bundle->fns->max_per_node(probe_data->bundle) == 1) {
             pe__foreach_bundle_replica(probe_data->bundle,
                                        order_replica_start_after, replica);
         }
     }
     if ((replica->container != NULL) && (replica->remote != NULL)
         && replica->remote->cmds->create_probe(replica->remote,
                                                probe_data->node)) {
         /* Do not probe the remote resource until we know where the container is
          * running. This is required for REMOTE_CONTAINER_HACK to correctly
          * probe remote resources.
          */
         char *probe_uuid = pcmk__op_key(replica->remote->id, RSC_STATUS, 0);
         pe_action_t *probe = find_first_action(replica->remote->actions,
                                                probe_uuid, NULL,
                                                probe_data->node);
 
         free(probe_uuid);
         if (probe != NULL) {
             probe_data->any_created = true;
             crm_trace("Ordering %s probe on %s",
                       replica->remote->id, pe__node_name(probe_data->node));
             pcmk__new_ordering(replica->container,
                                pcmk__op_key(replica->container->id, RSC_START,
                                             0),
                                NULL, replica->remote, NULL, probe,
                                pe_order_probe, probe_data->bundle->cluster);
         }
     }
     return true;
 }
 
 /*!
  * \internal
  *
  * \brief Schedule any probes needed for a resource on a node
  *
  * \param[in,out] rsc   Resource to create probe for
  * \param[in,out] node  Node to create probe on
  *
  * \return true if any probe was created, otherwise false
  */
 bool
 pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node)
 {
     struct probe_data probe_data = { rsc, node, false };
 
     CRM_CHECK(rsc != NULL, return false);
     pe__foreach_bundle_replica(rsc, create_replica_probes, &probe_data);
     return probe_data.any_created;
 }
 
 /*!
  * \internal
  * \brief Output actions for one bundle replica
  *
  * \param[in,out] replica    Replica to output actions for
  * \param[in]     user_data  Unused
  *
  * \return true (to indicate that any further replicas should be processed)
  */
 static bool
 output_replica_actions(pe__bundle_replica_t *replica, void *user_data)
 {
     if (replica->ip != NULL) {
         replica->ip->cmds->output_actions(replica->ip);
     }
     if (replica->container != NULL) {
         replica->container->cmds->output_actions(replica->container);
     }
     if (replica->remote != NULL) {
         replica->remote->cmds->output_actions(replica->remote);
     }
     if (replica->child != NULL) {
         replica->child->cmds->output_actions(replica->child);
     }
     return true;
 }
 
 void
 pcmk__output_bundle_actions(pe_resource_t *rsc)
 {
     CRM_CHECK(rsc != NULL, return);
 
     pe__foreach_bundle_replica(rsc, output_replica_actions, NULL);
 }
 
 // Bundle implementation of resource_alloc_functions_t:add_utilization()
 void
 pcmk__bundle_add_utilization(const pe_resource_t *rsc,
                              const pe_resource_t *orig_rsc, GList *all_rscs,
                              GHashTable *utilization)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
     if (bundle_data->replicas == NULL) {
         return;
     }
 
     /* All bundle replicas are identical, so using the utilization of the first
      * is sufficient for any. Only the implicit container resource can have
      * utilization values.
      */
     replica = (pe__bundle_replica_t *) bundle_data->replicas->data;
     if (replica->container != NULL) {
         replica->container->cmds->add_utilization(replica->container, orig_rsc,
                                                   all_rscs, utilization);
     }
 }
 
 // Bundle implementation of resource_alloc_functions_t:shutdown_lock()
 void
 pcmk__bundle_shutdown_lock(pe_resource_t *rsc)
 {
     return; // Bundles currently don't support shutdown locks
 }
diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c
index c880196f70..8ca421744f 100644
--- a/lib/pacemaker/pcmk_sched_instances.c
+++ b/lib/pacemaker/pcmk_sched_instances.c
@@ -1,1659 +1,1659 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 /* This file is intended for code usable with both clone instances and bundle
  * replica containers.
  */
 
 #include <crm_internal.h>
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Check whether a clone or bundle has instances for all available nodes
  *
  * \param[in] collective  Clone or bundle to check
  *
  * \return true if \p collective has enough instances for all of its available
  *         allowed nodes, otherwise false
  */
 static bool
 can_run_everywhere(const pe_resource_t *collective)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     int available_nodes = 0;
     int max_instances = 0;
 
     switch (collective->variant) {
         case pe_clone:
             max_instances = pe__clone_max(collective);
             break;
         case pe_container:
             max_instances = pe__bundle_max(collective);
             break;
         default:
             return false; // Not actually possible
     }
 
     g_hash_table_iter_init(&iter, collective->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         if (pcmk__node_available(node, false, false)
             && (max_instances < ++available_nodes)) {
             return false;
         }
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is allowed to run an instance
  *
  * \param[in] instance      Clone instance or bundle container to check
  * \param[in] node          Node to check
  * \param[in] max_per_node  Maximum number of instances allowed to run on a node
  *
  * \return true if \p node is allowed to run \p instance, otherwise false
  */
 static bool
 can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
                  int max_per_node)
 {
     pe_node_t *allowed_node = NULL;
 
     if (pcmk_is_set(instance->flags, pe_rsc_orphan)) {
         pe_rsc_trace(instance, "%s cannot run on %s: orphaned",
                      instance->id, pe__node_name(node));
         return false;
     }
 
     if (!pcmk__node_available(node, false, false)) {
         pe_rsc_trace(instance,
                      "%s cannot run on %s: node cannot run resources",
                      instance->id, pe__node_name(node));
         return false;
     }
 
     allowed_node = pcmk__top_allowed_node(instance, node);
     if (allowed_node == NULL) {
         crm_warn("%s cannot run on %s: node not allowed",
                  instance->id, pe__node_name(node));
         return false;
     }
 
     if (allowed_node->weight < 0) {
         pe_rsc_trace(instance, "%s cannot run on %s: parent score is %s there",
                      instance->id, pe__node_name(node),
                      pcmk_readable_score(allowed_node->weight));
         return false;
     }
 
     if (allowed_node->count >= max_per_node) {
         pe_rsc_trace(instance,
                      "%s cannot run on %s: node already has %d instance%s",
                      instance->id, pe__node_name(node), max_per_node,
                      pcmk__plural_s(max_per_node));
         return false;
     }
 
     pe_rsc_trace(instance, "%s can run on %s (%d already running)",
                  instance->id, pe__node_name(node), allowed_node->count);
     return true;
 }
 
 /*!
  * \internal
  * \brief Ban a clone instance or bundle replica from unavailable allowed nodes
  *
  * \param[in,out] instance      Clone instance or bundle replica to ban
  * \param[in]     max_per_node  Maximum instances allowed to run on a node
  */
 static void
 ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
 {
     if (instance->allowed_nodes != NULL) {
         GHashTableIter iter;
         pe_node_t *node = NULL;
 
         g_hash_table_iter_init(&iter, instance->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             if (!can_run_instance(instance, node, max_per_node)) {
                 pe_rsc_trace(instance, "Banning %s from unavailable node %s",
                              instance->id, pe__node_name(node));
                 node->weight = -INFINITY;
                 for (GList *child_iter = instance->children;
                      child_iter != NULL; child_iter = child_iter->next) {
                     pe_resource_t *child = (pe_resource_t *) child_iter->data;
                     pe_node_t *child_node = NULL;
 
                     child_node = pe_hash_table_lookup(child->allowed_nodes,
                                                       node->details->id);
                     if (child_node != NULL) {
                         pe_rsc_trace(instance,
                                      "Banning %s child %s "
                                      "from unavailable node %s",
                                      instance->id, child->id,
                                      pe__node_name(node));
                         child_node->weight = -INFINITY;
                     }
                 }
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create a hash table with a single node in it
  *
  * \param[in] node  Node to copy into new table
  *
  * \return Newly created hash table containing a copy of \p node
  * \note The caller is responsible for freeing the result with
  *       g_hash_table_destroy().
  */
 static GHashTable *
 new_node_table(pe_node_t *node)
 {
     GHashTable *table = pcmk__strkey_table(NULL, free);
 
     node = pe__copy_node(node);
     g_hash_table_insert(table, (gpointer) node->details->id, node);
     return table;
 }
 
 /*!
  * \internal
  * \brief Apply a resource's parent's colocation scores to a node table
  *
  * \param[in]     rsc    Resource whose colocations should be applied
  * \param[in,out] nodes  Node table to apply colocations to
  */
 static void
 apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
 {
     GList *iter = NULL;
     pcmk__colocation_t *colocation = NULL;
     pe_resource_t *other = NULL;
     float factor = 0.0;
 
     /* Because the this_with_colocations() and with_this_colocations() methods
      * boil down to copies of rsc_cons and rsc_cons_lhs for clones and bundles,
      * we can use those here directly for efficiency.
      */
     for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
         colocation = (pcmk__colocation_t *) iter->data;
         other = colocation->primary;
         factor = colocation->score / (float) INFINITY,
         other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
                                                colocation->node_attribute,
                                                factor,
                                                pcmk__coloc_select_default);
     }
     for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
         colocation = (pcmk__colocation_t *) iter->data;
         if (!pcmk__colocation_has_influence(colocation, rsc)) {
             continue;
         }
         other = colocation->dependent;
         factor = colocation->score / (float) INFINITY,
         other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
                                                colocation->node_attribute,
                                                factor,
                                                pcmk__coloc_select_nonnegative);
     }
 }
 
 /*!
  * \internal
  * \brief Compare clone or bundle instances based on colocation scores
  *
  * Determine the relative order in which two clone or bundle instances should be
  * assigned to nodes, considering the scores of colocation constraints directly
  * or indirectly involving them.
  *
  * \param[in] instance1  First instance to compare
  * \param[in] instance2  Second instance to compare
  *
  * \return A negative number if \p instance1 should be assigned first,
  *         a positive number if \p instance2 should be assigned first,
  *         or 0 if assignment order doesn't matter
  */
 static int
 cmp_instance_by_colocation(const pe_resource_t *instance1,
                            const pe_resource_t *instance2)
 {
     int rc = 0;
     pe_node_t *node1 = NULL;
     pe_node_t *node2 = NULL;
     pe_node_t *current_node1 = pe__current_node(instance1);
     pe_node_t *current_node2 = pe__current_node(instance2);
     GHashTable *colocated_scores1 = NULL;
     GHashTable *colocated_scores2 = NULL;
 
     CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
                && (instance2 != NULL) && (instance2->parent != NULL)
                && (current_node1 != NULL) && (current_node2 != NULL));
 
     // Create node tables initialized with each node
     colocated_scores1 = new_node_table(current_node1);
     colocated_scores2 = new_node_table(current_node2);
 
     // Apply parental colocations
     apply_parent_colocations(instance1, &colocated_scores1);
     apply_parent_colocations(instance2, &colocated_scores2);
 
     // Find original nodes again, with scores updated for colocations
     node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
     node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
 
     // Compare nodes by updated scores
     if (node1->weight < node2->weight) {
         crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
                   instance1->id, node1->weight, pe__node_name(node1),
                   instance2->id, node2->weight, pe__node_name(node2));
         rc = 1;
 
     } else if (node1->weight > node2->weight) {
         crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
                   instance1->id, node1->weight, pe__node_name(node1),
                   instance2->id, node2->weight, pe__node_name(node2));
         rc = -1;
     }
 
     g_hash_table_destroy(colocated_scores1);
     g_hash_table_destroy(colocated_scores2);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Check whether a resource or any of its children are failed
  *
  * \param[in] rsc  Resource to check
  *
  * \return true if \p rsc or any of its children are failed, otherwise false
  */
 static bool
 did_fail(const pe_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         return true;
     }
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         if (did_fail((const pe_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is allowed to run a resource
  *
  * \param[in]     rsc   Resource to check
  * \param[in,out] node  Node to check (will be set NULL if not allowed)
  *
  * \return true if *node is either NULL or allowed for \p rsc, otherwise false
  */
 static bool
 node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
 {
     if (*node != NULL) {
         pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
                                                   (*node)->details->id);
         if ((allowed == NULL) || (allowed->weight < 0)) {
             pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
                          rsc->id, pe__node_name(*node));
             *node = NULL;
             return false;
         }
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Compare two clone or bundle instances' instance numbers
  *
  * \param[in] a  First instance to compare
  * \param[in] b  Second instance to compare
  *
  * \return A negative number if \p a's instance number is lower,
  *         a positive number if \p b's instance number is lower,
  *         or 0 if their instance numbers are the same
  */
 gint
 pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
 {
     const pe_resource_t *instance1 = (const pe_resource_t *) a;
     const pe_resource_t *instance2 = (const pe_resource_t *) b;
     char *div1 = NULL;
     char *div2 = NULL;
 
     CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
 
     // Clone numbers are after a colon, bundle numbers after a dash
     div1 = strrchr(instance1->id, ':');
     if (div1 == NULL) {
         div1 = strrchr(instance1->id, '-');
     }
     div2 = strrchr(instance2->id, ':');
     if (div2 == NULL) {
         div2 = strrchr(instance2->id, '-');
     }
     CRM_ASSERT((div1 != NULL) && (div2 != NULL));
 
     return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
 }
 
 /*!
  * \internal
  * \brief Compare clone or bundle instances according to assignment order
  *
  * Compare two clone or bundle instances according to the order they should be
  * assigned to nodes, preferring (in order):
  *
  *  - Active instance that is less multiply active
  *  - Instance that is not active on a disallowed node
  *  - Instance with higher configured priority
  *  - Active instance whose current node can run resources
  *  - Active instance whose parent is allowed on current node
  *  - Active instance whose current node has fewer other instances
  *  - Active instance
  *  - Instance that isn't failed
  *  - Instance whose colocations result in higher score on current node
  *  - Instance with lower ID in lexicographic order
  *
  * \param[in] a          First instance to compare
  * \param[in] b          Second instance to compare
  *
  * \return A negative number if \p a should be assigned first,
  *         a positive number if \p b should be assigned first,
  *         or 0 if assignment order doesn't matter
  */
 gint
 pcmk__cmp_instance(gconstpointer a, gconstpointer b)
 {
     int rc = 0;
     pe_node_t *node1 = NULL;
     pe_node_t *node2 = NULL;
     unsigned int nnodes1 = 0;
     unsigned int nnodes2 = 0;
 
     bool can1 = true;
     bool can2 = true;
 
     const pe_resource_t *instance1 = (const pe_resource_t *) a;
     const pe_resource_t *instance2 = (const pe_resource_t *) b;
 
     CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
 
     node1 = instance1->fns->active_node(instance1, &nnodes1, NULL);
     node2 = instance2->fns->active_node(instance2, &nnodes2, NULL);
 
     /* If both instances are running and at least one is multiply
      * active, prefer instance that's running on fewer nodes.
      */
     if ((nnodes1 > 0) && (nnodes2 > 0)) {
         if (nnodes1 < nnodes2) {
             crm_trace("Assign %s (active on %d) before %s (active on %d): "
                       "less multiply active",
                       instance1->id, nnodes1, instance2->id, nnodes2);
             return -1;
 
         } else if (nnodes1 > nnodes2) {
             crm_trace("Assign %s (active on %d) after %s (active on %d): "
                       "more multiply active",
                       instance1->id, nnodes1, instance2->id, nnodes2);
             return 1;
         }
     }
 
     /* An instance that is either inactive or active on an allowed node is
      * preferred over an instance that is active on a no-longer-allowed node.
      */
     can1 = node_is_allowed(instance1, &node1);
     can2 = node_is_allowed(instance2, &node2);
     if (can1 && !can2) {
         crm_trace("Assign %s before %s: not active on a disallowed node",
                   instance1->id, instance2->id);
         return -1;
 
     } else if (!can1 && can2) {
         crm_trace("Assign %s after %s: active on a disallowed node",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance with higher configured priority
     if (instance1->priority > instance2->priority) {
         crm_trace("Assign %s before %s: priority (%d > %d)",
                   instance1->id, instance2->id,
                   instance1->priority, instance2->priority);
         return -1;
 
     } else if (instance1->priority < instance2->priority) {
         crm_trace("Assign %s after %s: priority (%d < %d)",
                   instance1->id, instance2->id,
                   instance1->priority, instance2->priority);
         return 1;
     }
 
     // Prefer active instance
     if ((node1 == NULL) && (node2 == NULL)) {
         crm_trace("No assignment preference for %s vs. %s: inactive",
                   instance1->id, instance2->id);
         return 0;
 
     } else if (node1 == NULL) {
         crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
         return 1;
 
     } else if (node2 == NULL) {
         crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
         return -1;
     }
 
     // Prefer instance whose current node can run resources
     can1 = pcmk__node_available(node1, false, false);
     can2 = pcmk__node_available(node2, false, false);
     if (can1 && !can2) {
         crm_trace("Assign %s before %s: current node can run resources",
                   instance1->id, instance2->id);
         return -1;
 
     } else if (!can1 && can2) {
         crm_trace("Assign %s after %s: current node can't run resources",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance whose parent is allowed to run on instance's current node
     node1 = pcmk__top_allowed_node(instance1, node1);
     node2 = pcmk__top_allowed_node(instance2, node2);
     if ((node1 == NULL) && (node2 == NULL)) {
         crm_trace("No assignment preference for %s vs. %s: "
                   "parent not allowed on either instance's current node",
                   instance1->id, instance2->id);
         return 0;
 
     } else if (node1 == NULL) {
         crm_trace("Assign %s after %s: parent not allowed on current node",
                   instance1->id, instance2->id);
         return 1;
 
     } else if (node2 == NULL) {
         crm_trace("Assign %s before %s: parent allowed on current node",
                   instance1->id, instance2->id);
         return -1;
     }
 
     // Prefer instance whose current node is running fewer other instances
     if (node1->count < node2->count) {
         crm_trace("Assign %s before %s: fewer active instances on current node",
                   instance1->id, instance2->id);
         return -1;
 
     } else if (node1->count > node2->count) {
         crm_trace("Assign %s after %s: more active instances on current node",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance that isn't failed
     can1 = did_fail(instance1);
     can2 = did_fail(instance2);
     if (!can1 && can2) {
         crm_trace("Assign %s before %s: not failed",
                   instance1->id, instance2->id);
         return -1;
     } else if (can1 && !can2) {
         crm_trace("Assign %s after %s: failed",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance with higher cumulative colocation score on current node
     rc = cmp_instance_by_colocation(instance1, instance2);
     if (rc != 0) {
         return rc;
     }
 
     // Prefer instance with lower instance number
     rc = pcmk__cmp_instance_number(instance1, instance2);
     if (rc < 0) {
         crm_trace("Assign %s before %s: instance number",
                   instance1->id, instance2->id);
     } else if (rc > 0) {
         crm_trace("Assign %s after %s: instance number",
                   instance1->id, instance2->id);
     } else {
         crm_trace("No assignment preference for %s vs. %s",
                   instance1->id, instance2->id);
     }
     return rc;
 }
 
 /*!
  * \internal
  * \brief Choose a node for an instance
  *
  * \param[in,out] instance      Clone instance or bundle replica container
  * \param[in]     prefer        If not NULL, attempt early assignment to this
  *                              node, if still the best choice; otherwise,
  *                              perform final assignment
  * \param[in]     max_per_node  Assign at most this many instances to one node
  *
  * \return true if \p instance could be assigned to a node, otherwise false
  */
 static bool
 assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
                 int max_per_node)
 {
     pe_node_t *chosen = NULL;
     pe_node_t *allowed = NULL;
 
     CRM_ASSERT(instance != NULL);
     pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
                  ((prefer == NULL)? "no node" : prefer->details->uname));
 
     if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
         // Instance is already assigned
         return instance->fns->location(instance, NULL, FALSE) != NULL;
     }
 
     if (pcmk_is_set(instance->flags, pe_rsc_allocating)) {
         pe_rsc_debug(instance,
                      "Assignment loop detected involving %s colocations",
                      instance->id);
         return false;
     }
 
     if (prefer != NULL) { // Possible early assignment to preferred node
 
         // Get preferred node with instance's scores
         allowed = g_hash_table_lookup(instance->allowed_nodes,
                                       prefer->details->id);
 
         if ((allowed == NULL) || (allowed->weight < 0)) {
             pe_rsc_trace(instance,
                          "Not assigning %s to preferred node %s: unavailable",
                          instance->id, pe__node_name(prefer));
             return false;
         }
     }
 
     ban_unavailable_allowed_nodes(instance, max_per_node);
 
     if (prefer == NULL) { // Final assignment
         chosen = instance->cmds->assign(instance, NULL);
 
     } else { // Possible early assignment to preferred node
         GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes);
 
         chosen = instance->cmds->assign(instance, prefer);
 
         // Revert nodes if preferred node won't be assigned
         if ((chosen != NULL) && (chosen->details != prefer->details)) {
             crm_info("Not assigning %s to preferred node %s: %s is better",
                      instance->id, pe__node_name(prefer),
                      pe__node_name(chosen));
             g_hash_table_destroy(instance->allowed_nodes);
             instance->allowed_nodes = backup;
             pcmk__unassign_resource(instance);
             chosen = NULL;
         } else if (backup != NULL) {
             g_hash_table_destroy(backup);
         }
     }
 
     // The parent tracks how many instances have been assigned to each node
     if (chosen != NULL) {
         allowed = pcmk__top_allowed_node(instance, chosen);
         if (allowed == NULL) {
             /* The instance is allowed on the node, but its parent isn't. This
              * shouldn't be possible if the resource is managed, and we won't be
              * able to limit the number of instances assigned to the node.
              */
             CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed));
 
         } else {
             allowed->count++;
         }
     }
     return chosen != NULL;
 }
 
 /*!
  * \internal
  * \brief Reset the node counts of a resource's allowed nodes to zero
  *
  * \param[in,out] rsc  Resource to reset
  *
  * \return Number of nodes that are available to run resources
  */
 static unsigned int
 reset_allowed_node_counts(pe_resource_t *rsc)
 {
     unsigned int available_nodes = 0;
     pe_node_t *node = NULL;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         node->count = 0;
         if (pcmk__node_available(node, false, false)) {
             available_nodes++;
         }
     }
     return available_nodes;
 }
 
 /*!
  * \internal
  * \brief Check whether an instance has a preferred node
  *
  * \param[in] rsc               Clone or bundle being assigned (for logs only)
  * \param[in] instance          Clone instance or bundle replica container
  * \param[in] optimal_per_node  Optimal number of instances per node
  *
  * \return Instance's current node if still available, otherwise NULL
  */
 static const pe_node_t *
 preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
                int optimal_per_node)
 {
     const pe_node_t *node = NULL;
     const pe_node_t *parent_node = NULL;
 
     // Check whether instance is active, healthy, and not yet assigned
     if ((instance->running_on == NULL)
         || !pcmk_is_set(instance->flags, pe_rsc_provisional)
         || pcmk_is_set(instance->flags, pe_rsc_failed)) {
         return NULL;
     }
 
     // Check whether instance's current node can run resources
     node = pe__current_node(instance);
     if (!pcmk__node_available(node, true, false)) {
         pe_rsc_trace(rsc, "Not assigning %s to %s early (unavailable)",
                      instance->id, pe__node_name(node));
         return NULL;
     }
 
     // Check whether node already has optimal number of instances assigned
     parent_node = pcmk__top_allowed_node(instance, node);
     if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
         pe_rsc_trace(rsc,
                      "Not assigning %s to %s early "
                      "(optimal instances already assigned)",
                      instance->id, pe__node_name(node));
         return NULL;
     }
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Assign collective instances to nodes
  *
  * \param[in,out] collective    Clone or bundle resource being assigned
  * \param[in,out] instances     List of clone instances or bundle containers
  * \param[in]     max_total     Maximum instances to assign in total
  * \param[in]     max_per_node  Maximum instances to assign to any one node
  */
 void
 pcmk__assign_instances(pe_resource_t *collective, GList *instances,
                        int max_total, int max_per_node)
 {
     // Reuse node count to track number of assigned instances
     unsigned int available_nodes = reset_allowed_node_counts(collective);
 
     int optimal_per_node = 0;
     int assigned = 0;
     GList *iter = NULL;
     pe_resource_t *instance = NULL;
     const pe_node_t *current = NULL;
 
     if (available_nodes > 0) {
         optimal_per_node = max_total / available_nodes;
     }
     if (optimal_per_node < 1) {
         optimal_per_node = 1;
     }
 
     pe_rsc_debug(collective,
                  "Assigning up to %d %s instance%s to up to %u node%s "
                  "(at most %d per host, %d optimal)",
                  max_total, collective->id, pcmk__plural_s(max_total),
                  available_nodes, pcmk__plural_s(available_nodes),
                  max_per_node, optimal_per_node);
 
     // Assign as many instances as possible to their current location
     for (iter = instances; (iter != NULL) && (assigned < max_total);
          iter = iter->next) {
         instance = (pe_resource_t *) iter->data;
 
         current = preferred_node(collective, instance, optimal_per_node);
         if ((current != NULL)
             && assign_instance(instance, current, max_per_node)) {
             pe_rsc_trace(collective, "Assigned %s to current node %s",
                          instance->id, pe__node_name(current));
             assigned++;
         }
     }
 
     pe_rsc_trace(collective, "Assigned %d of %d instance%s to current node",
                  assigned, max_total, pcmk__plural_s(max_total));
 
     for (iter = instances; iter != NULL; iter = iter->next) {
         instance = (pe_resource_t *) iter->data;
 
         if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
             continue; // Already assigned
         }
 
         if (instance->running_on != NULL) {
             current = pe__current_node(instance);
             if (pcmk__top_allowed_node(instance, current) == NULL) {
                 const char *unmanaged = "";
 
                 if (!pcmk_is_set(instance->flags, pe_rsc_managed)) {
                     unmanaged = "Unmanaged resource ";
                 }
                 crm_notice("%s%s is running on %s which is no longer allowed",
                            unmanaged, instance->id, pe__node_name(current));
             }
         }
 
         if (assigned >= max_total) {
             pe_rsc_debug(collective,
                          "Not assigning %s because maximum %d instances "
                          "already assigned",
                          instance->id, max_total);
             resource_location(instance, NULL, -INFINITY,
                               "collective_limit_reached", collective->cluster);
 
         } else if (assign_instance(instance, NULL, max_per_node)) {
             assigned++;
         }
     }
 
     pe_rsc_debug(collective, "Assigned %d of %d possible instance%s of %s",
                  assigned, max_total, pcmk__plural_s(max_total),
                  collective->id);
 }
 
 enum instance_state {
     instance_starting   = (1 << 0),
     instance_stopping   = (1 << 1),
 
     /* This indicates that some instance is restarting. It's not the same as
      * instance_starting|instance_stopping, which would indicate that some
      * instance is starting, and some instance (not necessarily the same one) is
      * stopping.
      */
     instance_restarting = (1 << 2),
 
     instance_active     = (1 << 3),
 
     instance_all        = instance_starting|instance_stopping
                           |instance_restarting|instance_active,
 };
 
 /*!
  * \internal
  * \brief Check whether an instance is active, starting, and/or stopping
  *
  * \param[in]     instance  Clone instance or bundle replica container
  * \param[in,out] state     Whether any instance is starting, stopping, etc.
  */
 static void
 check_instance_state(const pe_resource_t *instance, uint32_t *state)
 {
     const GList *iter = NULL;
     uint32_t instance_state = 0; // State of just this instance
 
     // No need to check further if all conditions have already been detected
     if (pcmk_all_flags_set(*state, instance_all)) {
         return;
     }
 
     // If instance is a collective (a cloned group), check its children instead
     if (instance->variant > pe_native) {
         for (iter = instance->children;
              (iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
              iter = iter->next) {
             check_instance_state((const pe_resource_t *) iter->data, state);
         }
         return;
     }
 
     // If we get here, instance is a primitive
 
     if (instance->running_on != NULL) {
         instance_state |= instance_active;
     }
 
     // Check each of the instance's actions for runnable start or stop
     for (iter = instance->actions;
          (iter != NULL) && !pcmk_all_flags_set(instance_state,
                                                instance_starting
                                                |instance_stopping);
          iter = iter->next) {
 
         const pe_action_t *action = (const pe_action_t *) iter->data;
         const bool optional = pcmk_is_set(action->flags, pe_action_optional);
 
         if (pcmk__str_eq(RSC_START, action->task, pcmk__str_none)) {
             if (!optional && pcmk_is_set(action->flags, pe_action_runnable)) {
                 pe_rsc_trace(instance, "Instance is starting due to %s",
                              action->uuid);
                 instance_state |= instance_starting;
             } else {
                 pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
                              action->uuid, instance->id,
                              (optional? "optional" : "unrunnable"));
             }
 
         } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_none)) {
             /* Only stop actions can be pseudo-actions for primitives. That
              * indicates that the node they are on is being fenced, so the stop
              * is implied rather than actually executed.
              */
             if (!optional
                 && pcmk_any_flags_set(action->flags,
                                       pe_action_pseudo|pe_action_runnable)) {
                 pe_rsc_trace(instance, "Instance is stopping due to %s",
                              action->uuid);
                 instance_state |= instance_stopping;
             } else {
                 pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
                              action->uuid, instance->id,
                              (optional? "optional" : "unrunnable"));
             }
         }
     }
 
     if (pcmk_all_flags_set(instance_state,
                            instance_starting|instance_stopping)) {
         instance_state |= instance_restarting;
     }
     *state |= instance_state;
 }
 
 /*!
  * \internal
  * \brief Create actions for collective resource instances
  *
  * \param[in,out] collective    Clone or bundle resource to create actions for
  * \param[in,out] instances     List of clone instances or bundle containers
  */
 void
 pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
 {
     uint32_t state = 0;
 
     pe_action_t *stop = NULL;
     pe_action_t *stopped = NULL;
 
     pe_action_t *start = NULL;
     pe_action_t *started = NULL;
 
     pe_rsc_trace(collective, "Creating collective instance actions for %s",
                  collective->id);
 
     // Create actions for each instance appropriate to its variant
     for (GList *iter = instances; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         instance->cmds->create_actions(instance);
         check_instance_state(instance, &state);
     }
 
     // Create pseudo-actions for rsc start and started
     start = pe__new_rsc_pseudo_action(collective, RSC_START,
                                       !pcmk_is_set(state, instance_starting),
                                       true);
     started = pe__new_rsc_pseudo_action(collective, RSC_STARTED,
                                         !pcmk_is_set(state, instance_starting),
                                         false);
     started->priority = INFINITY;
     if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
         pe__set_action_flags(started, pe_action_runnable);
     }
 
     // Create pseudo-actions for rsc stop and stopped
     stop = pe__new_rsc_pseudo_action(collective, RSC_STOP,
                                      !pcmk_is_set(state, instance_stopping),
                                      true);
     stopped = pe__new_rsc_pseudo_action(collective, RSC_STOPPED,
                                         !pcmk_is_set(state, instance_stopping),
                                         true);
     stopped->priority = INFINITY;
     if (!pcmk_is_set(state, instance_restarting)) {
         pe__set_action_flags(stop, pe_action_migrate_runnable);
     }
 
     if (collective->variant == pe_clone) {
         pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
                                           stopped);
     }
 }
 
 /*!
  * \internal
  * \brief Get a list of clone instances or bundle replica containers
  *
  * \param[in] rsc  Clone or bundle resource
  *
  * \return Clone instances if \p rsc is a clone, or a newly created list of
  *         \p rsc's replica containers if \p rsc is a bundle
  * \note The caller must call free_instance_list() on the result when the list
  *       is no longer needed.
  */
 static inline GList *
 get_instance_list(const pe_resource_t *rsc)
 {
     if (rsc->variant == pe_container) {
         return pe__bundle_containers(rsc);
     } else {
         return rsc->children;
     }
 }
 
 /*!
  * \internal
  * \brief Free any memory created by get_instance_list()
  *
  * \param[in]     rsc   Clone or bundle resource passed to get_instance_list()
  * \param[in,out] list  Return value of get_instance_list() for \p rsc
  */
 static inline void
 free_instance_list(const pe_resource_t *rsc, GList *list)
 {
     if (list != rsc->children) {
         g_list_free(list);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether an instance is compatible with a role and node
  *
  * \param[in] instance  Clone instance or bundle replica container
  * \param[in] node      Instance must match this node
  * \param[in] role      If not RSC_ROLE_UNKNOWN, instance must match this role
  * \param[in] current   If true, compare instance's original node and role,
  *                      otherwise compare assigned next node and role
  *
  * \return true if \p instance is compatible with \p node and \p role,
  *         otherwise false
  */
 bool
 pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
                        enum rsc_role_e role, bool current)
 {
     pe_node_t *instance_node = NULL;
 
     CRM_CHECK((instance != NULL) && (node != NULL), return false);
 
     if ((role != RSC_ROLE_UNKNOWN)
         && (role != instance->fns->state(instance, current))) {
         pe_rsc_trace(instance,
                      "%s is not a compatible instance (role is not %s)",
                      instance->id, role2text(role));
         return false;
     }
 
     if (!is_set_recursive(instance, pe_rsc_block, true)) {
         // We only want instances that haven't failed
         instance_node = instance->fns->location(instance, NULL, current);
     }
 
     if (instance_node == NULL) {
         pe_rsc_trace(instance,
                      "%s is not a compatible instance (not assigned to a node)",
                      instance->id);
         return false;
     }
 
     if (instance_node->details != node->details) {
         pe_rsc_trace(instance,
                      "%s is not a compatible instance (assigned to %s not %s)",
                      instance->id, pe__node_name(instance_node),
                      pe__node_name(node));
         return false;
     }
 
     return true;
 }
 
 /*!
  * \internal
  * \brief Find an instance that matches a given resource by node and role
  *
  * \param[in] match_rsc  Resource that instance must match (for logging only)
  * \param[in] rsc        Clone or bundle resource to check for matching instance
  * \param[in] node       Instance must match this node
  * \param[in] role       If not RSC_ROLE_UNKNOWN, instance must match this role
  * \param[in] current    If true, compare instance's original node and role,
  *                       otherwise compare assigned next node and role
  *
  * \return \p rsc instance matching \p node and \p role if any, otherwise NULL
  */
 static pe_resource_t *
 find_compatible_instance_on_node(const pe_resource_t *match_rsc,
                                  const pe_resource_t *rsc,
                                  const pe_node_t *node, enum rsc_role_e role,
                                  bool current)
 {
     GList *instances = NULL;
 
     instances = get_instance_list(rsc);
     for (GList *iter = instances; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         if (pcmk__instance_matches(instance, node, role, current)) {
             pe_rsc_trace(match_rsc, "Found %s %s instance %s compatible with %s on %s",
                          role == RSC_ROLE_UNKNOWN? "matching" : role2text(role),
                          rsc->id, instance->id, match_rsc->id,
                          pe__node_name(node));
             free_instance_list(rsc, instances); // Only frees list, not contents
             return instance;
         }
     }
     free_instance_list(rsc, instances);
 
     pe_rsc_trace(match_rsc, "No %s %s instance found compatible with %s on %s",
                  ((role == RSC_ROLE_UNKNOWN)? "matching" : role2text(role)),
                  rsc->id, match_rsc->id, pe__node_name(node));
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Find a clone instance or bundle container compatible with a resource
  *
  * \param[in] match_rsc  Resource that instance must match
  * \param[in] rsc        Clone or bundle resource to check for matching instance
  * \param[in] role       If not RSC_ROLE_UNKNOWN, instance must match this role
  * \param[in] current    If true, compare instance's original node and role,
  *                       otherwise compare assigned next node and role
  *
  * \return Compatible (by \p role and \p match_rsc location) instance of \p rsc
  *         if any, otherwise NULL
  */
 pe_resource_t *
 pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
                                const pe_resource_t *rsc, enum rsc_role_e role,
                                bool current)
 {
     pe_resource_t *instance = NULL;
     GList *nodes = NULL;
     const pe_node_t *node = match_rsc->fns->location(match_rsc, NULL, current);
 
     // If match_rsc has a node, check only that node
     if (node != NULL) {
         return find_compatible_instance_on_node(match_rsc, rsc, node, role,
                                                 current);
     }
 
     // Otherwise check for an instance matching any of match_rsc's allowed nodes
     nodes = pcmk__sort_nodes(g_hash_table_get_values(match_rsc->allowed_nodes),
                              NULL);
     for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
          iter = iter->next) {
         instance = find_compatible_instance_on_node(match_rsc, rsc,
                                                     (pe_node_t *) iter->data,
                                                     role, current);
     }
 
     if (instance == NULL) {
         pe_rsc_debug(rsc, "No %s instance found compatible with %s",
                      rsc->id, match_rsc->id);
     }
     g_list_free(nodes);
     return instance;
 }
 
 /*!
  * \internal
  * \brief Unassign an instance if mandatory ordering has no interleave match
  *
  * \param[in]     first          'First' action in an ordering
  * \param[in]     then           'Then' action in an ordering
  * \param[in,out] then_instance  'Then' instance that has no interleave match
  * \param[in]     type           Group of enum pe_ordering flags to apply
  * \param[in]     current        If true, "then" action is stopped or demoted
  *
  * \return true if \p then_instance was unassigned, otherwise false
  */
 static bool
 unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
                       pe_resource_t *then_instance, uint32_t type, bool current)
 {
     // Allow "then" instance to go down even without an interleave match
     if (current) {
         pe_rsc_trace(then->rsc,
                      "%s has no instance to order before stopping "
                      "or demoting %s",
                      first->rsc->id, then_instance->id);
 
     /* If the "first" action must be runnable, but there is no "first"
      * instance, the "then" instance must not be allowed to come up.
      */
     } else if (pcmk_any_flags_set(type, pe_order_runnable_left
                                         |pe_order_implies_then)) {
         pe_rsc_info(then->rsc,
                     "Inhibiting %s from being active "
                     "because there is no %s instance to interleave",
                     then_instance->id, first->rsc->id);
         return pcmk__assign_resource(then_instance, NULL, true);
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Find first matching action for a clone instance or bundle container
  *
  * \param[in] action       Action in an interleaved ordering
  * \param[in] instance     Clone instance or bundle container being interleaved
  * \param[in] action_name  Action to look for
  * \param[in] node         If not NULL, require action to be on this node
  * \param[in] for_first    If true, \p instance is the 'first' resource in the
  *                         ordering, otherwise it is the 'then' resource
  *
  * \return First action for \p instance (or in some cases if \p instance is a
  *         bundle container, its containerized resource) that matches
  *         \p action_name and \p node if any, otherwise NULL
  */
 static pe_action_t *
 find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
                      const char *action_name, const pe_node_t *node,
                      bool for_first)
 {
     const pe_resource_t *rsc = NULL;
     pe_action_t *matching_action = NULL;
 
     /* If instance is a bundle container, sometimes we should interleave the
      * action for the container itself, and sometimes for the containerized
      * resource.
      *
      * For example, given "start bundle A then bundle B", B likely requires the
      * service inside A's container to be active, rather than just the
      * container, so we should interleave the action for A's containerized
      * resource. On the other hand, it's possible B's container itself requires
      * something from A, so we should interleave the action for B's container.
      *
      * Essentially, for 'first', we should use the containerized resource for
      * everything except stop, and for 'then', we should use the container for
      * everything except promote and demote (which can only be performed on the
      * containerized resource).
      */
     if ((for_first && !pcmk__str_any_of(action->task, CRMD_ACTION_STOP,
                                         CRMD_ACTION_STOPPED, NULL))
 
         || (!for_first && pcmk__str_any_of(action->task, CRMD_ACTION_PROMOTE,
                                            CRMD_ACTION_PROMOTED,
                                            CRMD_ACTION_DEMOTE,
                                            CRMD_ACTION_DEMOTED, NULL))) {
 
-        rsc = pcmk__get_rsc_in_container(instance);
+        rsc = pe__get_rsc_in_container(instance);
     }
     if (rsc == NULL) {
         rsc = instance; // No containerized resource, use instance itself
     } else {
         node = NULL; // Containerized actions are on bundle-created guest
     }
 
     matching_action = find_first_action(rsc->actions, NULL, action_name, node);
     if (matching_action != NULL) {
         return matching_action;
     }
 
     if (pcmk_is_set(instance->flags, pe_rsc_orphan)
         || pcmk__str_any_of(action_name, RSC_STOP, RSC_DEMOTE, NULL)) {
         crm_trace("No %s action found for %s%s",
                   action_name,
                   pcmk_is_set(instance->flags, pe_rsc_orphan)? "orphan " : "",
                   instance->id);
     } else {
         crm_err("No %s action found for %s to interleave (bug?)",
                 action_name, instance->id);
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Get the original action name of a bundle or clone action
  *
  * Given an action for a bundle or clone, get the original action name,
  * mapping notify to the action being notified, and if the instances are
  * primitives, mapping completion actions to the action that was completed
  * (for example, stopped to stop).
  *
  * \param[in] action  Clone or bundle action to check
  *
  * \return Original action name for \p action
  */
 static const char *
 orig_action_name(const pe_action_t *action)
 {
     const pe_resource_t *instance = action->rsc->children->data; // Any instance
     char *action_type = NULL;
     const char *action_name = action->task;
     enum action_tasks orig_task = no_action;
 
     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_NOTIFY,
                              CRMD_ACTION_NOTIFIED, NULL)) {
         // action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
         CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
                   return task2text(no_action));
         action_name = strstr(action_type, "_notify_");
         CRM_CHECK(action_name != NULL, return task2text(no_action));
         action_name += strlen("_notify_");
     }
     orig_task = get_complex_task(instance, action_name);
     free(action_type);
     return task2text(orig_task);
 }
 
 /*!
  * \internal
  * \brief Update two interleaved actions according to an ordering between them
  *
  * Given information about an ordering of two interleaved actions, update the
  * actions' flags (and runnable_before members if appropriate) as appropriate
  * for the ordering. Effects may cascade to other orderings involving the
  * actions as well.
  *
  * \param[in,out] first     'First' action in an ordering
  * \param[in,out] then      'Then' action in an ordering
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 static uint32_t
 update_interleaved_actions(pe_action_t *first, pe_action_t *then,
                            const pe_node_t *node, uint32_t filter,
                            uint32_t type)
 {
     GList *instances = NULL;
     uint32_t changed = pcmk__updated_none;
     const char *orig_first_task = orig_action_name(first);
 
     // Stops and demotes must be interleaved with instance on current node
     bool current = pcmk__ends_with(first->uuid, "_" CRMD_ACTION_STOPPED "_0")
                    || pcmk__ends_with(first->uuid,
                                       "_" CRMD_ACTION_DEMOTED "_0");
 
     // Update the specified actions for each "then" instance individually
     instances = get_instance_list(then->rsc);
     for (GList *iter = instances; iter != NULL; iter = iter->next) {
         pe_resource_t *first_instance = NULL;
         pe_resource_t *then_instance = iter->data;
 
         pe_action_t *first_action = NULL;
         pe_action_t *then_action = NULL;
 
         // Find a "first" instance to interleave with this "then" instance
         first_instance = pcmk__find_compatible_instance(then_instance,
                                                         first->rsc,
                                                         RSC_ROLE_UNKNOWN,
                                                         current);
 
         if (first_instance == NULL) { // No instance can be interleaved
             if (unassign_if_mandatory(first, then, then_instance, type,
                                       current)) {
                 pcmk__set_updated_flags(changed, first, pcmk__updated_then);
             }
             continue;
         }
 
         first_action = find_instance_action(first, first_instance,
                                             orig_first_task, node, true);
         if (first_action == NULL) {
             continue;
         }
 
         then_action = find_instance_action(then, then_instance, then->task,
                                            node, false);
         if (then_action == NULL) {
             continue;
         }
 
         if (order_actions(first_action, then_action, type)) {
             pcmk__set_updated_flags(changed, first,
                                     pcmk__updated_first|pcmk__updated_then);
         }
 
         changed |= then_instance->cmds->update_ordered_actions(
             first_action, then_action, node,
             first_instance->cmds->action_flags(first_action, node), filter,
             type, then->rsc->cluster);
     }
     free_instance_list(then->rsc, instances);
     return changed;
 }
 
 /*!
  * \internal
  * \brief Check whether two actions in an ordering can be interleaved
  *
  * \param[in] first  'First' action in the ordering
  * \param[in] then   'Then' action in the ordering
  *
  * \return true if \p first and \p then can be interleaved, otherwise false
  */
 static bool
 can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
 {
     bool interleave = false;
     pe_resource_t *rsc = NULL;
 
     if ((first->rsc == NULL) || (then->rsc == NULL)) {
         crm_trace("Not interleaving %s with %s: not resource actions",
                   first->uuid, then->uuid);
         return false;
     }
 
     if (first->rsc == then->rsc) {
         crm_trace("Not interleaving %s with %s: same resource",
                   first->uuid, then->uuid);
         return false;
     }
 
     if ((first->rsc->variant < pe_clone) || (then->rsc->variant < pe_clone)) {
         crm_trace("Not interleaving %s with %s: not clones or bundles",
                   first->uuid, then->uuid);
         return false;
     }
 
     if (pcmk__ends_with(then->uuid, "_stop_0")
         || pcmk__ends_with(then->uuid, "_demote_0")) {
         rsc = first->rsc;
     } else {
         rsc = then->rsc;
     }
 
     interleave = crm_is_true(g_hash_table_lookup(rsc->meta,
                                                  XML_RSC_ATTR_INTERLEAVE));
     pe_rsc_trace(rsc, "'%s then %s' will %sbe interleaved (based on %s)",
                  first->uuid, then->uuid, (interleave? "" : "not "), rsc->id);
     return interleave;
 }
 
 /*!
  * \internal
  * \brief Update non-interleaved instance actions according to an ordering
  *
  * Given information about an ordering of two non-interleaved actions, update
  * the actions' flags (and runnable_before members if appropriate) as
  * appropriate for the ordering. Effects may cascade to other orderings
  * involving the actions as well.
  *
  * \param[in,out] instance  Clone instance or bundle container
  * \param[in,out] first     "First" action in ordering
  * \param[in]     then      "Then" action in ordering (for \p instance's parent)
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  * \param[in]     flags     Action flags for \p first for ordering purposes
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 static uint32_t
 update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
                               const pe_action_t *then, const pe_node_t *node,
                               uint32_t flags, uint32_t filter, uint32_t type)
 {
     pe_action_t *instance_action = NULL;
     uint32_t instance_flags = 0;
     uint32_t changed = pcmk__updated_none;
 
     // Check whether instance has an equivalent of "then" action
     instance_action = find_first_action(instance->actions, NULL, then->task,
                                         node);
     if (instance_action == NULL) {
         return changed;
     }
 
     // Check whether action is runnable
     instance_flags = instance->cmds->action_flags(instance_action, node);
     if (!pcmk_is_set(instance_flags, pe_action_runnable)) {
         return changed;
     }
 
     // If so, update actions for the instance
     changed = instance->cmds->update_ordered_actions(first, instance_action,
                                                      node, flags, filter, type,
                                                      instance->cluster);
 
     // Propagate any changes to later actions
     if (pcmk_is_set(changed, pcmk__updated_then)) {
         for (GList *after_iter = instance_action->actions_after;
              after_iter != NULL; after_iter = after_iter->next) {
             pe_action_wrapper_t *after = after_iter->data;
 
             pcmk__update_action_for_orderings(after->action, instance->cluster);
         }
     }
 
     return changed;
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two clone or bundle actions, update
  * the actions' flags (and runnable_before members if appropriate) as
  * appropriate for the ordering. Effects may cascade to other orderings
  * involving the actions as well.
  *
  * \param[in,out] first     'First' action in an ordering
  * \param[in,out] then      'Then' action in an ordering
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  *                          (only used when interleaving instances)
  * \param[in]     flags     Action flags for \p first for ordering purposes
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  * \param[in,out] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
                                       const pe_node_t *node, uint32_t flags,
                                       uint32_t filter, uint32_t type,
                                       pe_working_set_t *data_set)
 {
     if (then->rsc == NULL) {
         return pcmk__updated_none;
 
     } else if (can_interleave_actions(first, then)) {
         return update_interleaved_actions(first, then, node, filter, type);
 
     } else {
         uint32_t changed = pcmk__updated_none;
         GList *instances = get_instance_list(then->rsc);
 
         // Update actions for the clone or bundle resource itself
         changed |= pcmk__update_ordered_actions(first, then, node, flags,
                                                 filter, type, data_set);
 
         // Update the 'then' clone instances or bundle containers individually
         for (GList *iter = instances; iter != NULL; iter = iter->next) {
             pe_resource_t *instance = iter->data;
 
             changed |= update_noninterleaved_actions(instance, first, then,
                                                      node, flags, filter, type);
         }
         free_instance_list(then->rsc, instances);
         return changed;
     }
 }
 
 #define pe__clear_action_summary_flags(flags, action, flag) do {        \
         flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                      "Action summary", action->rsc->id, \
                                      flags, flag, #flag);               \
     } while (0)
 
 /*!
  * \internal
  * \brief Return action flags for a given clone or bundle action
  *
  * \param[in,out] action     Action for a clone or bundle
  * \param[in]     instances  Clone instances or bundle containers
  * \param[in]     node       If not NULL, limit effects to this node
  *
  * \return Flags appropriate to \p action on \p node
  */
 enum pe_action_flags
 pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
                               const pe_node_t *node)
 {
     bool any_runnable = false;
     enum pe_action_flags flags;
     const char *action_name = orig_action_name(action);
 
     // Set original assumptions (optional and runnable may be cleared below)
     flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
 
     for (const GList *iter = instances; iter != NULL; iter = iter->next) {
         const pe_resource_t *instance = iter->data;
         const pe_node_t *instance_node = NULL;
         pe_action_t *instance_action = NULL;
         enum pe_action_flags instance_flags;
 
         // Node is relevant only to primitive instances
         if (instance->variant == pe_native) {
             instance_node = node;
         }
 
         instance_action = find_first_action(instance->actions, NULL,
                                             action_name, instance_node);
         if (instance_action == NULL) {
             pe_rsc_trace(action->rsc, "%s has no %s action on %s",
                          instance->id, action_name, pe__node_name(node));
             continue;
         }
 
         pe_rsc_trace(action->rsc, "%s has %s for %s on %s",
                      instance->id, instance_action->uuid, action_name,
                      pe__node_name(node));
 
         instance_flags = instance->cmds->action_flags(instance_action, node);
 
         // If any instance action is mandatory, so is the collective action
         if (pcmk_is_set(flags, pe_action_optional)
             && !pcmk_is_set(instance_flags, pe_action_optional)) {
             pe_rsc_trace(instance, "%s is mandatory because %s is",
                          action->uuid, instance_action->uuid);
             pe__clear_action_summary_flags(flags, action, pe_action_optional);
             pe__clear_action_flags(action, pe_action_optional);
         }
 
         // If any instance action is runnable, so is the collective action
         if (pcmk_is_set(instance_flags, pe_action_runnable)) {
             any_runnable = true;
         }
     }
 
     if (!any_runnable) {
         pe_rsc_trace(action->rsc,
                      "%s is not runnable because no instance can run %s",
                      action->uuid, action_name);
         pe__clear_action_summary_flags(flags, action, pe_action_runnable);
         if (node == NULL) {
             pe__clear_action_flags(action, pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 /*!
  * \internal
  * \brief Add a collective resource's colocations to a list for an instance
  *
  * \param[in,out] list        Colocation list to add to
  * \param[in]     instance    Clone or bundle instance or instance group member
  * \param[in]     collective  Clone or bundle resource with colocations to add
  * \param[in]     with_this   If true, add collective's "with this" colocations,
  *                            otherwise add its "this with" colocations
  */
 void
 pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance,
                                  const pe_resource_t *collective,
                                  bool with_this)
 {
     const GList *colocations = NULL;
     bool everywhere = false;
 
     CRM_CHECK((list != NULL) && (instance != NULL), return);
 
     if (collective == NULL) {
         return;
     }
     switch (collective->variant) {
         case pe_clone:
         case pe_container:
             break;
         default:
             return;
     }
 
     everywhere = can_run_everywhere(collective);
 
     if (with_this) {
         colocations = collective->rsc_cons_lhs;
     } else {
         colocations = collective->rsc_cons;
     }
 
     for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
         const pcmk__colocation_t *colocation = iter->data;
 
         if (with_this
             && !pcmk__colocation_has_influence(colocation, instance)) {
            continue;
         }
         if (!everywhere || (colocation->score < 0)
             || (!with_this && (colocation->score == INFINITY))) {
 
             if (with_this) {
                 pcmk__add_with_this(list, colocation);
             } else {
                 pcmk__add_this_with(list, colocation);
             }
         }
     }
 }
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 38a8817340..a8d1354d7f 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,2070 +1,2100 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 #include <stdint.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 #define PE__VARIANT_BUNDLE 1
 #include "./variant.h"
 
 /*!
  * \internal
  * \brief Get maximum number of bundle replicas allowed to run
  *
  * \param[in] rsc  Bundle or bundled resource to check
  *
  * \return Maximum replicas for bundle corresponding to \p rsc
  */
 int
 pe__bundle_max(const pe_resource_t *rsc)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
     return bundle_data->nreplicas;
 }
 
 /*!
  * \internal
  * \brief Get the resource inside a bundle
  *
  * \param[in] bundle  Bundle to check
  *
  * \return Resource inside \p bundle if any, otherwise NULL
  */
 pe_resource_t *
 pe__bundled_resource(const pe_resource_t *rsc)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
     return bundle_data->child;
 }
 
+/*!
+ * \internal
+ * \brief Get containerized resource corresponding to a given bundle container
+ *
+ * \param[in] instance  Collective instance that might be a bundle container
+ *
+ * \return Bundled resource instance inside \p instance if it is a bundle
+ *         container instance, otherwise NULL
+ */
+const pe_resource_t *
+pe__get_rsc_in_container(const pe_resource_t *instance)
+{
+    const pe__bundle_variant_data_t *data = NULL;
+    const pe_resource_t *top = pe__const_top_resource(instance, true);
+
+    if ((top == NULL) || (top->variant != pe_container)) {
+        return NULL;
+    }
+    get_bundle_variant_data(data, top);
+
+    for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
+        const pe__bundle_replica_t *replica = iter->data;
+
+        if (instance == replica->container) {
+            return replica->child;
+        }
+    }
+    return NULL;
+}
+
 /*!
  * \internal
  * \brief Check whether a given node is created by a bundle
  *
  * \param[in] bundle  Bundle resource to check
  * \param[in] node    Node to check
  *
  * \return true if \p node is an instance of \p bundle, otherwise false
  */
 bool
 pe__node_is_bundle_instance(const pe_resource_t *bundle, const pe_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
 
         if (pe__same_node(node, replica->node)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Iterate over bundle replicas
  *
  * \param[in,out] bundle     Bundle to iterate over
  * \param[in]     fn         Function to call for each replica
  * \param[in,out] user_data  Pointer to pass to \p fn
  */
 void
 pe__foreach_bundle_replica(const pe_resource_t *bundle,
                            bool (*fn)(pe__bundle_replica_t *, void *),
                            void *user_data)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         if (!fn((pe__bundle_replica_t *) iter->data, user_data)) {
             break;
         }
     }
 }
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static void
 allocate_ip(pe__bundle_variant_data_t *data, pe__bundle_replica_t *replica,
             GString *buffer)
 {
     if(data->ip_range_start == NULL) {
         return;
 
     } else if(data->ip_last) {
         replica->ipaddr = next_ip(data->ip_last);
 
     } else {
         replica->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = replica->ipaddr;
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             if (data->add_host) {
                 g_string_append_printf(buffer, " --add-host=%s-%d:%s",
                                        data->prefix, replica->offset,
                                        replica->ipaddr);
             } else {
                 g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
                                        replica->ipaddr, data->prefix,
                                        replica->offset);
             }
             break;
 
         case PE__CONTAINER_AGENT_RKT:
             g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
                                    replica->ipaddr, data->prefix,
                                    replica->offset);
             break;
 
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind)
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, XML_ATTR_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in,out] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(pe__bundle_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->nreplicas_per_host > 1) {
             pe_err("Specifying the 'control-port' for %s requires 'replicas-per-host=1'", data->prefix);
             data->nreplicas_per_host = 1;
             // @TODO to be sure: pe__clear_resource_flags(rsc, pe_rsc_unique);
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static int
 create_ip_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                    pe__bundle_replica_t *replica)
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), "monitor", "60s", NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (pe__unpack_resource(xml_ip, &replica->ip, parent,
                                 parent->cluster) != pcmk_rc_ok) {
             return pcmk_rc_unpack_error;
         }
 
         parent->children = g_list_append(parent->children, replica->ip);
     }
     return pcmk_rc_ok;
 }
 
 static const char*
 container_agent_str(enum pe__container_agent t)
 {
     switch (t) {
         case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
         case PE__CONTAINER_AGENT_RKT:    return PE__CONTAINER_AGENT_RKT_S;
         case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return PE__CONTAINER_AGENT_UNKNOWN_S;
 }
 
 static int
 create_container_resource(pe_resource_t *parent,
                           const pe__bundle_variant_data_t *data,
                           pe__bundle_replica_t *replica)
 {
     char *id = NULL;
     xmlNode *xml_container = NULL;
     xmlNode *xml_obj = NULL;
 
     // Agent-specific
     const char *hostname_opt = NULL;
     const char *env_opt = NULL;
     const char *agent_str = NULL;
     int volid = 0;  // rkt-only
 
     GString *buffer = NULL;
     GString *dbuffer = NULL;
 
     // Where syntax differences are drop-in replacements, set them now
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             hostname_opt = "-h ";
             env_opt = "-e ";
             break;
         case PE__CONTAINER_AGENT_RKT:
             hostname_opt = "--hostname=";
             env_opt = "--environment=";
             break;
         default:    // PE__CONTAINER_AGENT_UNKNOWN
             return pcmk_rc_unpack_error;
     }
     agent_str = container_agent_str(data->agent_type);
 
     buffer = g_string_sized_new(4096);
 
     id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str,
                            replica->offset);
     crm_xml_sanitize_id(id);
     xml_container = create_resource(id, "heartbeat", agent_str);
     free(id);
 
     xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
     crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset);
 
     crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
     crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
     crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
     crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
     if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) {
         g_string_append(buffer, " --restart=no");
     }
 
     /* Set a container hostname only if we have an IP to map it to. The user can
      * set -h or --uts=host themselves if they want a nicer name for logs, but
      * this makes applications happy who need their  hostname to match the IP
      * they bind to.
      */
     if (data->ip_range_start != NULL) {
         g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix,
                                replica->offset);
     }
     pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL);
 
     if (data->container_network != NULL) {
         pcmk__g_strcat(buffer, " --net=", data->container_network, NULL);
     }
 
     if (data->control_port != NULL) {
         pcmk__g_strcat(buffer, " ", env_opt, "PCMK_remote_port=",
                       data->control_port, NULL);
     } else {
         g_string_append_printf(buffer, " %sPCMK_remote_port=%d", env_opt,
                                DEFAULT_REMOTE_PORT);
     }
 
     for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
         pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data;
         char *source = NULL;
 
         if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
             source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix,
                                        replica->offset);
             pcmk__add_separated_word(&dbuffer, 1024, source, ",");
         }
 
         switch (data->agent_type) {
             case PE__CONTAINER_AGENT_DOCKER:
             case PE__CONTAINER_AGENT_PODMAN:
                 pcmk__g_strcat(buffer,
                                " -v ", pcmk__s(source, mount->source),
                                ":", mount->target, NULL);
 
                 if (mount->options != NULL) {
                     pcmk__g_strcat(buffer, ":", mount->options, NULL);
                 }
                 break;
             case PE__CONTAINER_AGENT_RKT:
                 g_string_append_printf(buffer,
                                        " --volume vol%d,kind=host,"
                                        "source=%s%s%s "
                                        "--mount volume=vol%d,target=%s",
                                        volid, pcmk__s(source, mount->source),
                                        (mount->options != NULL)? "," : "",
                                        pcmk__s(mount->options, ""),
                                        volid, mount->target);
                 volid++;
                 break;
             default:
                 break;
         }
         free(source);
     }
 
     for (GList *iter = data->ports; iter != NULL; iter = iter->next) {
         pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data;
 
         switch (data->agent_type) {
             case PE__CONTAINER_AGENT_DOCKER:
             case PE__CONTAINER_AGENT_PODMAN:
                 if (replica->ipaddr != NULL) {
                     pcmk__g_strcat(buffer,
                                    " -p ", replica->ipaddr, ":", port->source,
                                    ":", port->target, NULL);
 
                 } else if (!pcmk__str_eq(data->container_network, "host",
                                          pcmk__str_none)) {
                     // No need to do port mapping if net == host
                     pcmk__g_strcat(buffer,
                                    " -p ", port->source, ":", port->target,
                                    NULL);
                 }
                 break;
             case PE__CONTAINER_AGENT_RKT:
                 if (replica->ipaddr != NULL) {
                     pcmk__g_strcat(buffer,
                                    " --port=", port->target,
                                    ":", replica->ipaddr, ":", port->source,
                                    NULL);
                 } else {
                     pcmk__g_strcat(buffer,
                                    " --port=", port->target, ":", port->source,
                                    NULL);
                 }
                 break;
             default:
                 break;
         }
     }
 
     /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because
      * it would cause restarts during rolling upgrades.
      *
      * In a previous version of the container resource creation logic, if
      * data->launcher_options is not NULL, we append
      * (" %s", data->launcher_options) even if data->launcher_options is an
      * empty string. Likewise for data->container_host_options. Using
      *
      *     pcmk__add_word(buffer, 0, data->launcher_options)
      *
      * removes that extra trailing space, causing a resource definition change.
      */
     if (data->launcher_options != NULL) {
         pcmk__g_strcat(buffer, " ", data->launcher_options, NULL);
     }
 
     if (data->container_host_options != NULL) {
         pcmk__g_strcat(buffer, " ", data->container_host_options, NULL);
     }
 
     crm_create_nvpair_xml(xml_obj, NULL, "run_opts",
                           (const char *) buffer->str);
     g_string_free(buffer, TRUE);
 
     crm_create_nvpair_xml(xml_obj, NULL, "mount_points",
                           (dbuffer != NULL)? (const char *) dbuffer->str : "");
     if (dbuffer != NULL) {
         g_string_free(dbuffer, TRUE);
     }
 
     if (replica->child != NULL) {
         if (data->container_command != NULL) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   data->container_command);
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   SBIN_DIR "/pacemaker-remoted");
         }
 
         /* TODO: Allow users to specify their own?
          *
          * We just want to know if the container is alive; we'll monitor the
          * child independently.
          */
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
 #if 0
         /* @TODO Consider supporting the use case where we can start and stop
          * resources, but not proxy local commands (such as setting node
          * attributes), by running the local executor in stand-alone mode.
          * However, this would probably be better done via ACLs as with other
          * Pacemaker Remote nodes.
          */
     } else if ((child != NULL) && data->untrusted) {
         crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                               CRM_DAEMON_DIR "/pacemaker-execd");
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
                               CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
 #endif
     } else {
         if (data->container_command != NULL) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   data->container_command);
         }
 
         /* TODO: Allow users to specify their own?
          *
          * We don't know what's in the container, so we just want to know if it
          * is alive.
          */
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
     }
 
     xml_obj = create_xml_node(xml_container, "operations");
     crm_create_op_xml(xml_obj, ID(xml_container), "monitor", "60s", NULL);
 
     // TODO: Other ops? Timeouts and intervals from underlying resource?
     if (pe__unpack_resource(xml_container, &replica->container, parent,
                             parent->cluster) != pcmk_rc_ok) {
         return pcmk_rc_unpack_error;
     }
     pe__set_resource_flags(replica->container, pe_rsc_replica_container);
     parent->children = g_list_append(parent->children, replica->container);
 
     return pcmk_rc_ok;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(pe_resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pe_node_t *) match)->weight = -INFINITY;
         ((pe_node_t *) match)->rsc_discover_mode = pe_discover_never;
     }
     if (rsc->children) {
         g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
     }
 }
 
 static int
 create_remote_resource(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                        pe__bundle_replica_t *replica)
 {
     if (replica->child && valid_network(data)) {
         GHashTableIter gIter;
         pe_node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (pe_find_resource(parent->cluster->resources, id) != NULL) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
                                    replica->child->id, replica->offset);
             //@TODO return error instead of asserting?
             CRM_ASSERT(pe_find_resource(parent->cluster->resources,
                                         id) == NULL);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = pcmk__itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets replica->container as replica->remote's container, which is
          * similar to what happens with guest nodes. This is how the scheduler
          * knows that the bundle node is fenced by recovering the container, and
          * that remote should be ordered relative to the container.
          */
         xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
                                           NULL, NULL, NULL,
                                           connect_name, (data->control_port?
                                           data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during data set cleanup to use as
          * the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(parent->cluster->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   parent->cluster);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pe_discover_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pe_node_t entry. Ideally, it would do the same for bundle nodes.
          * Unfortunately, a bundle has to be mostly unpacked before it's obvious
          * what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when pe__unpack_resource() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         g_list_foreach(parent->cluster->resources, (GFunc) disallow_node,
                        (gpointer) uname);
 
         replica->node = pe__copy_node(node);
         replica->node->weight = 500;
         replica->node->rsc_discover_mode = pe_discover_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         if (replica->child->allowed_nodes != NULL) {
             g_hash_table_destroy(replica->child->allowed_nodes);
         }
         replica->child->allowed_nodes = pcmk__strkey_table(NULL, free);
         g_hash_table_insert(replica->child->allowed_nodes,
                             (gpointer) replica->node->details->id,
                             pe__copy_node(replica->node));
 
         {
             pe_node_t *copy = pe__copy_node(replica->node);
             copy->weight = -INFINITY;
             g_hash_table_insert(replica->child->parent->allowed_nodes,
                                 (gpointer) replica->node->details->id, copy);
         }
         if (pe__unpack_resource(xml_remote, &replica->remote, parent,
                                 parent->cluster) != pcmk_rc_ok) {
             return pcmk_rc_unpack_error;
         }
 
         g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if (pe__is_guest_or_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         replica->node->details->remote_rsc = replica->remote;
 
         // Ensure pe__is_guest_node() functions correctly immediately
         replica->remote->container = replica->container;
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(replica->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * replica->remote to replica->container's fillers, which will make
          * pe__resource_contains_guest_node() true for replica->container.
          *
          * replica->child does NOT get added to replica->container's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking replica->container's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, replica->remote);
     }
     return pcmk_rc_ok;
 }
 
 static int
 create_replica_resources(pe_resource_t *parent, pe__bundle_variant_data_t *data,
                          pe__bundle_replica_t *replica)
 {
     int rc = pcmk_rc_ok;
 
     rc = create_container_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     rc = create_ip_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     rc = create_remote_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     if ((replica->child != NULL) && (replica->ipaddr != NULL)) {
         add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
     }
 
     if (replica->remote != NULL) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the container is active.
          *
          * This makes it possible to have Pacemaker Remote nodes running
          * containers with pacemaker-remoted inside in order to start
          * services inside those containers.
          */
         pe__set_resource_flags(replica->remote, pe_rsc_allow_remote_remotes);
     }
     return rc;
 }
 
 static void
 mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
           const char *target, const char *options, uint32_t flags)
 {
     pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
 
     CRM_ASSERT(mount != NULL);
     mount->source = strdup(source);
     mount->target = strdup(target);
     pcmk__str_update(&mount->options, options);
     mount->flags = flags;
     bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
 }
 
 static void
 mount_free(pe__bundle_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void
 port_free(pe__bundle_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 static pe__bundle_replica_t *
 replica_for_remote(pe_resource_t *remote)
 {
     pe_resource_t *top = remote;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (top == NULL) {
         return NULL;
     }
 
     while (top->parent != NULL) {
         top = top->parent;
     }
 
     get_bundle_variant_data(bundle_data, top);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (replica->remote == remote) {
             return replica;
         }
     }
     CRM_LOG_ASSERT(FALSE);
     return NULL;
 }
 
 bool
 pe__bundle_needs_remote_name(pe_resource_t *rsc)
 {
     const char *value;
     GHashTable *params = NULL;
 
     if (rsc == NULL) {
         return false;
     }
 
     // Use NULL node since pcmk__bundle_expand() uses that to set value
     params = pe_rsc_params(rsc, NULL, rsc->cluster);
     value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR);
 
     return pcmk__str_eq(value, "#uname", pcmk__str_casei)
            && xml_contains_remote_node(rsc->xml);
 }
 
 const char *
 pe__add_bundle_remote_name(pe_resource_t *rsc, pe_working_set_t *data_set,
                            xmlNode *xml, const char *field)
 {
     // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
 
     pe_node_t *node = NULL;
     pe__bundle_replica_t *replica = NULL;
 
     if (!pe__bundle_needs_remote_name(rsc)) {
         return NULL;
     }
 
     replica = replica_for_remote(rsc);
     if (replica == NULL) {
         return NULL;
     }
 
     node = replica->container->allocated_to;
     if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
         node = pe__current_node(replica->container);
     }
 
     if(node == NULL) {
         crm_trace("Cannot determine address for bundle connection %s", rsc->id);
         return NULL;
     }
 
     crm_trace("Setting address for bundle connection %s to bundle host %s",
               rsc->id, pe__node_name(node));
     if(xml != NULL && field != NULL) {
         crm_xml_add(xml, field, node->details->uname);
     }
 
     return node->details->uname;
 }
 
 #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do {     \
         flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,           \
                                    "Bundle mount", ID(mount_xml), flags,    \
                                    (flags_to_set), #flags_to_set);          \
     } while (0)
 
 gboolean
 pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     bool need_log_mount = TRUE;
 
     CRM_ASSERT(rsc != NULL);
     pe_rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
     rsc->variant_opaque = bundle_data;
     bundle_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
     if (xml_obj != NULL) {
         bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
         if (xml_obj != NULL) {
             bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
         } else {
             xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
             if (xml_obj != NULL) {
                 bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
             } else {
                 return FALSE;
             }
         }
     }
 
     // Use 0 for default, minimum, and invalid promoted-max
     value = crm_element_value(xml_obj, XML_RSC_ATTR_PROMOTED_MAX);
     if (value == NULL) {
         // @COMPAT deprecated since 2.0.0
         value = crm_element_value(xml_obj, "masters");
     }
     pcmk__scan_min_int(value, &bundle_data->promoted_max, 0);
 
     // Default replicas to promoted-max if it was specified and 1 otherwise
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && (bundle_data->promoted_max > 0)) {
         bundle_data->nreplicas = bundle_data->promoted_max;
     } else {
         pcmk__scan_min_int(value, &bundle_data->nreplicas, 1);
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if the container is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
     if (bundle_data->nreplicas_per_host == 1) {
         pe__clear_resource_flags(rsc, pe_rsc_unique);
     }
 
     bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
     bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
     bundle_data->image = crm_element_value_copy(xml_obj, "image");
     bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
         value = crm_element_value(xml_obj, "add-host");
         if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
             bundle_data->add_host = TRUE;
         }
 
         for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
              xml_child = pcmk__xe_next(xml_child)) {
 
             pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 bundle_data->ports = g_list_append(bundle_data->ports, port);
 
             } else {
                 pe_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
          xml_child = pcmk__xe_next(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = pe__bundle_mount_none;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             pe__set_bundle_mount_flags(xml_child, flags,
                                        pe__bundle_mount_subdir);
         }
 
         if (source && target) {
             mount_add(bundle_data, source, target, options, flags);
             if (strcmp(target, "/var/log") == 0) {
                 need_log_mount = FALSE;
             }
         } else {
             pe_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(bundle_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
 
         /* @COMPAT We no longer use the <master> tag, but we need to keep it as
          * part of the resource name, so that bundles don't restart in a rolling
          * upgrade. (It also avoids needing to change regression tests.)
          */
         crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
                       (bundle_data->promoted_max? "master"
                       : (const char *)xml_resource->name));
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = pcmk__itoa(bundle_data->nreplicas);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_MAX, value);
         free(value);
 
         value = pcmk__itoa(bundle_data->nreplicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_INCARNATION_NODEMAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                               pcmk__btoa(bundle_data->nreplicas_per_host > 1));
 
         if (bundle_data->promoted_max) {
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
 
             value = pcmk__itoa(bundle_data->promoted_max);
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTED_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, XML_ATTR_ID, bundle_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pe_err("Cannot control %s inside %s without either ip-range-start or control-port",
                rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GList *childIter = NULL;
         pe__bundle_port_t *port = NULL;
         GString *buffer = NULL;
 
         if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
                                 data_set) != pcmk_rc_ok) {
             return FALSE;
         }
 
         /* Currently, we always map the default authentication key location
          * into the same location inside the container.
          *
          * Ideally, we would respect the host's PCMK_authkey_location, but:
          * - it may be different on different nodes;
          * - the actual connection will do extra checking to make sure the key
          *   file exists and is readable, that we can't do here on the DC
          * - tools such as crm_resource and crm_simulate may not have the same
          *   environment variables as the cluster, causing operation digests to
          *   differ
          *
          * Always using the default location inside the container is fine,
          * because we control the pacemaker_remote environment, and it avoids
          * having to pass another environment variable to the container.
          *
          * @TODO A better solution may be to have only pacemaker_remote use the
          * environment variable, and have the cluster nodes use a new
          * cluster option for key location. This would introduce the limitation
          * of the location being the same on all cluster nodes, but that's
          * reasonable.
          */
         mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
                   DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
 
         if (need_log_mount) {
             mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
                       pe__bundle_mount_subdir);
         }
 
         port = calloc(1, sizeof(pe__bundle_port_t));
         if(bundle_data->control_port) {
             port->source = strdup(bundle_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = pcmk__itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         bundle_data->ports = g_list_append(bundle_data->ports, port);
 
         buffer = g_string_sized_new(1024);
         for (childIter = bundle_data->child->children; childIter != NULL;
              childIter = childIter->next) {
 
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->child = childIter->data;
             replica->child->exclusive_discover = TRUE;
             replica->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if (pcmk_is_set(replica->child->flags, pe_rsc_notify)) {
                 pe__set_resource_flags(bundle_data->child, pe_rsc_notify);
             }
 
             allocate_ip(bundle_data, replica, buffer);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
             bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
                                                                 XML_RSC_ATTR_TARGET);
         }
         bundle_data->container_host_options = g_string_free(buffer, FALSE);
 
         if (bundle_data->attribute_target) {
             g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
             g_hash_table_replace(bundle_data->child->meta,
                                  strdup(XML_RSC_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         GString *buffer = g_string_sized_new(1024);
 
         for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
             pe__bundle_replica_t *replica = calloc(1, sizeof(pe__bundle_replica_t));
 
             replica->offset = lpc;
             allocate_ip(bundle_data, replica, buffer);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
         }
         bundle_data->container_host_options = g_string_free(buffer, FALSE);
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) {
             pe_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
 
         /* Utilization needs special handling for bundles. It makes no sense for
          * the inner primitive to have utilization, because it is tied
          * one-to-one to the guest node created by the container resource -- and
          * there's no way to set capacities for that guest node anyway.
          *
          * What the user really wants is to configure utilization for the
          * container. However, the schema only allows utilization for
          * primitives, and the container resource is implicit anyway, so the
          * user can *only* configure utilization for the inner primitive. If
          * they do, move the primitive's utilization values to the container.
          *
          * @TODO This means that bundles without an inner primitive can't have
          * utilization. An alternative might be to allow utilization values in
          * the top-level bundle XML in the schema, and copy those to each
          * container.
          */
         if (replica->child != NULL) {
             GHashTable *empty = replica->container->utilization;
 
             replica->container->utilization = replica->child->utilization;
             replica->child->utilization = empty;
         }
     }
 
     if (bundle_data->child) {
         rsc->children = g_list_append(rsc->children, bundle_data->child);
     }
     return TRUE;
 }
 
 static int
 replica_resource_active(pe_resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 pe__bundle_active(pe_resource_t *rsc, gboolean all)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     GList *iter = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
         int rsc_active;
 
         rsc_active = replica_resource_active(replica->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->container, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 /*!
  * \internal
  * \brief Find the bundle replica corresponding to a given node
  *
  * \param[in] bundle  Top-level bundle resource
  * \param[in] node    Node to search for
  *
  * \return Bundle replica if found, NULL otherwise
  */
 pe_resource_t *
 pe__find_bundle_replica(const pe_resource_t *bundle, const pe_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_ASSERT(bundle && node);
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica && replica->node);
         if (replica->node->details == node->details) {
             return replica->child;
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 print_rsc_in_list(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 bundle_print_xml(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_strdup_printf("%s        ", pre_text);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print(XML_ATTR_ID "=\"%s\" ", rsc->id);
     status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
     status_print("image=\"%s\" ", bundle_data->image);
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_unique));
     status_print("managed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pe_rsc_failed));
     status_print(">\n");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         status_print("%s    <replica " XML_ATTR_ID "=\"%d\">\n",
                      pre_text, replica->offset);
         print_rsc_in_list(replica->ip, child_text, options, print_data);
         print_rsc_in_list(replica->child, child_text, options, print_data);
         print_rsc_in_list(replica->container, child_text, options, print_data);
         print_rsc_in_list(replica->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
     const char *desc = NULL;
 
     CRM_ASSERT(rsc != NULL);
     
     get_bundle_variant_data(bundle_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         char *id = NULL;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             desc = pe__resource_description(rsc, show_opts);
 
             rc = pe__name_and_nvpairs_xml(out, true, "bundle", 8,
                      "id", rsc->id,
                      "type", container_agent_str(bundle_data->agent_type),
                      "image", bundle_data->image,
                      "unique", pe__rsc_bool_str(rsc, pe_rsc_unique),
                      "maintenance", pe__rsc_bool_str(rsc, pe_rsc_maintenance),
                      "managed", pe__rsc_bool_str(rsc, pe_rsc_managed),
                      "failed", pe__rsc_bool_str(rsc, pe_rsc_failed),
                      "description", desc);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         id = pcmk__itoa(replica->offset);
         rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, "id", id);
         free(id);
         CRM_ASSERT(rc == pcmk_rc_ok);
 
         if (print_ip) {
             out->message(out, crm_map_element_name(replica->ip->xml), show_opts,
                          replica->ip, only_node, only_rsc);
         }
 
         if (print_child) {
             out->message(out, crm_map_element_name(replica->child->xml), show_opts,
                          replica->child, only_node, only_rsc);
         }
 
         if (print_ctnr) {
             out->message(out, crm_map_element_name(replica->container->xml), show_opts,
                          replica->container, only_node, only_rsc);
         }
 
         if (print_remote) {
             out->message(out, crm_map_element_name(replica->remote->xml), show_opts,
                          replica->remote, only_node, only_rsc);
         }
 
         pcmk__output_xml_pop_parent(out); // replica
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out); // bundle
     }
 
     return rc;
 }
 
 static void
 pe__bundle_replica_output_html(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, uint32_t show_opts)
 {
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_html(out, rsc, buffer, node, show_opts);
 }
 
 /*!
  * \internal
  * \brief Get a string describing a resource's unmanaged state or lack thereof
  *
  * \param[in] rsc  Resource to describe
  *
  * \return A string indicating that a resource is in maintenance mode or
  *         otherwise unmanaged, or an empty string otherwise
  */
 static const char *
 get_unmanaged_str(const pe_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_maintenance)) {
         return " (maintenance)";
     }
     if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         return " (unmanaged)";
     }
     return "";
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_html(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const char *desc = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     desc = pe__resource_description(rsc, show_opts);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset);
             }
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_show_opts, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_show_opts, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_show_opts, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_show_opts, replica->remote, only_node, only_rsc);
             }
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->end_list(out);
             }
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container),
                                            show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 pe__bundle_replica_output_text(pcmk__output_t *out, pe__bundle_replica_t *replica,
                                pe_node_t *node, uint32_t show_opts)
 {
     const pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_text(out, rsc, buffer, node, show_opts);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pe_resource_t *", "GList *", "GList *")
 int
 pe__bundle_text(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pe_resource_t *rsc = va_arg(args, pe_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const char *desc = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     desc = pe__resource_description(rsc, show_opts);
     
     get_bundle_variant_data(bundle_data, rsc);
 
     CRM_ASSERT(rsc != NULL);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->list_item(out, NULL, "Replica[%d]", replica->offset);
             }
 
             out->begin_list(out, NULL, NULL, NULL);
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_show_opts, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_show_opts, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_show_opts, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_show_opts, replica->remote, only_node, only_rsc);
             }
 
             out->end_list(out);
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container),
                                            show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 print_bundle_replica(pe__bundle_replica_t *replica, const char *pre_text,
                      long options, void *print_data)
 {
     pe_node_t *node = NULL;
     pe_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     node = pe__current_node(replica->container);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         bundle_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%sContainer bundle%s: %s [%s]%s%s\n",
                  pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
                  rsc->id, bundle_data->image,
                  pcmk_is_set(rsc->flags, pe_rsc_unique) ? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pe_rsc_managed) ? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pe__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if (pcmk_is_set(options, pe_print_implicit)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 status_print("  %sReplica[%d]\n", pre_text, replica->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(replica->ip, child_text, options, print_data);
             print_rsc_in_list(replica->container, child_text, options, print_data);
             print_rsc_in_list(replica->remote, child_text, options, print_data);
             print_rsc_in_list(replica->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             print_bundle_replica(replica, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 static void
 free_bundle_replica(pe__bundle_replica_t *replica)
 {
     if (replica == NULL) {
         return;
     }
 
     if (replica->node) {
         free(replica->node);
         replica->node = NULL;
     }
 
     if (replica->ip) {
         free_xml(replica->ip->xml);
         replica->ip->xml = NULL;
         replica->ip->fns->free(replica->ip);
         replica->ip = NULL;
     }
     if (replica->container) {
         free_xml(replica->container->xml);
         replica->container->xml = NULL;
         replica->container->fns->free(replica->container);
         replica->container = NULL;
     }
     if (replica->remote) {
         free_xml(replica->remote->xml);
         replica->remote->xml = NULL;
         replica->remote->fns->free(replica->remote);
         replica->remote = NULL;
     }
     free(replica->ipaddr);
     free(replica);
 }
 
 void
 pe__free_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     pe_rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(bundle_data->prefix);
     free(bundle_data->image);
     free(bundle_data->control_port);
     free(bundle_data->host_network);
     free(bundle_data->host_netmask);
     free(bundle_data->ip_range_start);
     free(bundle_data->container_network);
     free(bundle_data->launcher_options);
     free(bundle_data->container_command);
     g_free(bundle_data->container_host_options);
 
     g_list_free_full(bundle_data->replicas,
                      (GDestroyNotify) free_bundle_replica);
     g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(bundle_data->child) {
         free_xml(bundle_data->child->xml);
         bundle_data->child->xml = NULL;
         bundle_data->child->fns->free(bundle_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 pe__bundle_resource_state(const pe_resource_t *rsc, gboolean current)
 {
     enum rsc_role_e container_role = RSC_ROLE_UNKNOWN;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const pe_resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pe_container)) {
         return 0;
     } else {
         pe__bundle_variant_data_t *bundle_data = NULL;
 
         get_bundle_variant_data(bundle_data, rsc);
         return bundle_data->nreplicas;
     }
 }
 
 void
 pe__count_bundle(pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
         pe__bundle_replica_t *replica = item->data;
 
         if (replica->ip) {
             replica->ip->fns->count(replica->ip);
         }
         if (replica->child) {
             replica->child->fns->count(replica->child);
         }
         if (replica->container) {
             replica->container->fns->count(replica->container);
         }
         if (replica->remote) {
             replica->remote->fns->count(replica->remote);
         }
     }
 }
 
 gboolean
 pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                        gboolean check_parent)
 {
     gboolean passes = FALSE;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else {
         get_bundle_variant_data(bundle_data, rsc);
 
         for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
             pe__bundle_replica_t *replica = gIter->data;
 
             if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
 
 /*!
  * \internal
  * \brief Get a list of a bundle's containers
  *
  * \param[in] bundle  Bundle resource
  *
  * \return Newly created list of \p bundle's containers
  * \note It is the caller's responsibility to free the result with
  *       g_list_free().
  */
 GList *
 pe__bundle_containers(const pe_resource_t *bundle)
 {
     GList *containers = NULL;
     const pe__bundle_variant_data_t *data = NULL;
 
     get_bundle_variant_data(data, bundle);
     for (GList *iter = data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
 
         containers = g_list_append(containers, replica->container);
     }
     return containers;
 }
 
 // Bundle implementation of resource_object_functions_t:active_node()
 pe_node_t *
 pe__bundle_active_node(const pe_resource_t *rsc, unsigned int *count_all,
                        unsigned int *count_clean)
 {
     pe_node_t *active = NULL;
     pe_node_t *node = NULL;
     pe_resource_t *container = NULL;
     GList *containers = NULL;
     GList *iter = NULL;
     GHashTable *nodes = NULL;
     const pe__bundle_variant_data_t *data = NULL;
 
     if (count_all != NULL) {
         *count_all = 0;
     }
     if (count_clean != NULL) {
         *count_clean = 0;
     }
     if (rsc == NULL) {
         return NULL;
     }
 
     /* For the purposes of this method, we only care about where the bundle's
      * containers are active, so build a list of active containers.
      */
     get_bundle_variant_data(data, rsc);
     for (iter = data->replicas; iter != NULL; iter = iter->next) {
         pe__bundle_replica_t *replica = iter->data;
 
         if (replica->container->running_on != NULL) {
             containers = g_list_append(containers, replica->container);
         }
     }
     if (containers == NULL) {
         return NULL;
     }
 
     /* If the bundle has only a single active container, just use that
      * container's method. If live migration is ever supported for bundle
      * containers, this will allow us to prefer the migration source when there
      * is only one container and it is migrating. For now, this just lets us
      * avoid creating the nodes table.
      */
     if (pcmk__list_of_1(containers)) {
         container = containers->data;
         node = container->fns->active_node(container, count_all, count_clean);
         g_list_free(containers);
         return node;
     }
 
     // Add all containers' active nodes to a hash table (for uniqueness)
     nodes = g_hash_table_new(NULL, NULL);
     for (iter = containers; iter != NULL; iter = iter->next) {
         container = iter->data;
 
         for (GList *node_iter = container->running_on; node_iter != NULL;
              node_iter = node_iter->next) {
             node = node_iter->data;
 
             // If insert returns true, we haven't counted this node yet
             if (g_hash_table_insert(nodes, (gpointer) node->details,
                                     (gpointer) node)
                 && !pe__count_active_node(rsc, node, &active, count_all,
                                           count_clean)) {
                 goto done;
             }
         }
     }
 
 done:
     g_list_free(containers);
     g_hash_table_destroy(nodes);
     return active;
 }
 
 /*!
  * \internal
  * \brief Get maximum bundle resource instances per node
  *
  * \param[in] rsc  Bundle resource to check
  *
  * \return Maximum number of \p rsc instances that can be active on one node
  */
 unsigned int
 pe__bundle_max_per_node(const pe_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     CRM_ASSERT(bundle_data->nreplicas_per_host >= 0);
     return (unsigned int) bundle_data->nreplicas_per_host;
 }