diff --git a/include/crm/pengine/internal.h b/include/crm/pengine/internal.h
index d1157ae3ce..6afddf1724 100644
--- a/include/crm/pengine/internal.h
+++ b/include/crm/pengine/internal.h
@@ -1,762 +1,763 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PE_INTERNAL__H
 #  define PE_INTERNAL__H
 
 #  include <stdbool.h>
 #  include <stdint.h>
 #  include <string.h>
 #  include <crm/msg_xml.h>
 #  include <crm/pengine/status.h>
 #  include <crm/pengine/remote_internal.h>
 #  include <crm/common/internal.h>
 #  include <crm/common/options_internal.h>
 #  include <crm/common/output_internal.h>
 
 const char *pe__resource_description(const pe_resource_t *rsc, uint32_t show_opts);
 
 enum pe__clone_flags {
     // Whether instances should be started sequentially
     pe__clone_ordered               = (1 << 0),
 
     // Whether promotion scores have been added
     pe__clone_promotion_added       = (1 << 1),
 
     // Whether promotion constraints have been added
     pe__clone_promotion_constrained = (1 << 2),
 };
 
 bool pe__clone_is_ordered(const pe_resource_t *clone);
 int pe__set_clone_flag(pe_resource_t *clone, enum pe__clone_flags flag);
 
 
 enum pe__group_flags {
     pe__group_ordered       = (1 << 0), // Members start sequentially
     pe__group_colocated     = (1 << 1), // Members must be on same node
 };
 
 bool pe__group_flag_is_set(const pe_resource_t *group, uint32_t flags);
 pe_resource_t *pe__last_group_member(const pe_resource_t *group);
 
 
 #  define pe_rsc_info(rsc, fmt, args...)  crm_log_tag(LOG_INFO,  rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_debug(rsc, fmt, args...) crm_log_tag(LOG_DEBUG, rsc ? rsc->id : "<NULL>", fmt, ##args)
 #  define pe_rsc_trace(rsc, fmt, args...) crm_log_tag(LOG_TRACE, rsc ? rsc->id : "<NULL>", fmt, ##args)
 
 #  define pe_err(fmt...) do {           \
         was_processing_error = TRUE;    \
         pcmk__config_err(fmt);          \
     } while (0)
 
 #  define pe_warn(fmt...) do {          \
         was_processing_warning = TRUE;  \
         pcmk__config_warn(fmt);         \
     } while (0)
 
 #  define pe_proc_err(fmt...) { was_processing_error = TRUE; crm_err(fmt); }
 #  define pe_proc_warn(fmt...) { was_processing_warning = TRUE; crm_warn(fmt); }
 
 #define pe__set_working_set_flags(working_set, flags_to_set) do {           \
         (working_set)->flags = pcmk__set_flags_as(__func__, __LINE__,       \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_set), #flags_to_set);           \
     } while (0)
 
 #define pe__clear_working_set_flags(working_set, flags_to_clear) do {       \
         (working_set)->flags = pcmk__clear_flags_as(__func__, __LINE__,     \
             LOG_TRACE, "Working set", crm_system_name,                      \
             (working_set)->flags, (flags_to_clear), #flags_to_clear);       \
     } while (0)
 
 #define pe__set_resource_flags(resource, flags_to_set) do {                 \
         (resource)->flags = pcmk__set_flags_as(__func__, __LINE__,          \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_set), #flags_to_set);                                 \
     } while (0)
 
 #define pe__clear_resource_flags(resource, flags_to_clear) do {             \
         (resource)->flags = pcmk__clear_flags_as(__func__, __LINE__,        \
             LOG_TRACE, "Resource", (resource)->id, (resource)->flags,       \
             (flags_to_clear), #flags_to_clear);                             \
     } while (0)
 
 #define pe__set_action_flags(action, flags_to_set) do {                     \
         (action)->flags = pcmk__set_flags_as(__func__, __LINE__,            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags(action, flags_to_clear) do {                 \
         (action)->flags = pcmk__clear_flags_as(__func__, __LINE__,          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_raw_action_flags(action_flags, action_name, flags_to_set) do { \
         action_flags = pcmk__set_flags_as(__func__, __LINE__,               \
                                           LOG_TRACE, "Action", action_name, \
                                           (action_flags),                   \
                                           (flags_to_set), #flags_to_set);   \
     } while (0)
 
 #define pe__clear_raw_action_flags(action_flags, action_name, flags_to_clear) do { \
         action_flags = pcmk__clear_flags_as(__func__, __LINE__,             \
                                             LOG_TRACE,                      \
                                             "Action", action_name,          \
                                             (action_flags),                 \
                                             (flags_to_clear),               \
                                             #flags_to_clear);               \
     } while (0)
 
 #define pe__set_action_flags_as(function, line, action, flags_to_set) do {  \
         (action)->flags = pcmk__set_flags_as((function), (line),            \
                                              LOG_TRACE,                     \
                                              "Action", (action)->uuid,      \
                                              (action)->flags,               \
                                              (flags_to_set),                \
                                              #flags_to_set);                \
     } while (0)
 
 #define pe__clear_action_flags_as(function, line, action, flags_to_clear) do { \
         (action)->flags = pcmk__clear_flags_as((function), (line),          \
                                                LOG_TRACE,                   \
                                                "Action", (action)->uuid,    \
                                                (action)->flags,             \
                                                (flags_to_clear),            \
                                                #flags_to_clear);            \
     } while (0)
 
 #define pe__set_order_flags(order_flags, flags_to_set) do {                 \
         order_flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                          "Ordering", "constraint",          \
                                          order_flags, (flags_to_set),       \
                                          #flags_to_set);                    \
     } while (0)
 
 #define pe__clear_order_flags(order_flags, flags_to_clear) do {               \
         order_flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                            "Ordering", "constraint",          \
                                            order_flags, (flags_to_clear),     \
                                            #flags_to_clear);                  \
     } while (0)
 
 // Some warnings we don't want to print every transition
 
 enum pe_warn_once_e {
     pe_wo_blind         = (1 << 0),
     pe_wo_restart_type  = (1 << 1),
     pe_wo_role_after    = (1 << 2),
     pe_wo_poweroff      = (1 << 3),
     pe_wo_require_all   = (1 << 4),
     pe_wo_order_score   = (1 << 5),
     pe_wo_neg_threshold = (1 << 6),
     pe_wo_remove_after  = (1 << 7),
     pe_wo_ping_node     = (1 << 8),
     pe_wo_order_inst    = (1 << 9),
     pe_wo_coloc_inst    = (1 << 10),
     pe_wo_group_order   = (1 << 11),
     pe_wo_group_coloc   = (1 << 12),
     pe_wo_upstart       = (1 << 13),
     pe_wo_nagios        = (1 << 14),
     pe_wo_set_ordering  = (1 << 15),
 };
 
 extern uint32_t pe_wo;
 
 #define pe_warn_once(pe_wo_bit, fmt...) do {    \
         if (!pcmk_is_set(pe_wo, pe_wo_bit)) {  \
             if (pe_wo_bit == pe_wo_blind) {     \
                 crm_warn(fmt);                  \
             } else {                            \
                 pe_warn(fmt);                   \
             }                                   \
             pe_wo = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,       \
                                       "Warn-once", "logging", pe_wo,        \
                                       (pe_wo_bit), #pe_wo_bit);             \
         }                                       \
     } while (0);
 
 
 typedef struct pe__location_constraint_s {
     char *id;                           // Constraint XML ID
     pe_resource_t *rsc_lh;              // Resource being located
     enum rsc_role_e role_filter;        // Role to locate
     enum pe_discover_e discover_mode;   // Resource discovery
     GList *node_list_rh;              // List of pe_node_t*
 } pe__location_t;
 
 typedef struct pe__order_constraint_s {
     int id;
     uint32_t flags; // Group of enum pe_ordering flags
 
     void *lh_opaque;
     pe_resource_t *lh_rsc;
     pe_action_t *lh_action;
     char *lh_action_task;
 
     void *rh_opaque;
     pe_resource_t *rh_rsc;
     pe_action_t *rh_action;
     char *rh_action_task;
 } pe__ordering_t;
 
 const pe_resource_t *pe__const_top_resource(const pe_resource_t *rsc,
                                             bool include_bundle);
 
 int pe__clone_max(const pe_resource_t *clone);
 int pe__clone_node_max(const pe_resource_t *clone);
 int pe__clone_promoted_max(const pe_resource_t *clone);
 int pe__clone_promoted_node_max(const pe_resource_t *clone);
 void pe__create_clone_notifications(pe_resource_t *clone);
 void pe__free_clone_notification_data(pe_resource_t *clone);
 void pe__create_clone_notif_pseudo_ops(pe_resource_t *clone,
                                        pe_action_t *start, pe_action_t *started,
                                        pe_action_t *stop, pe_action_t *stopped);
 
 
 pe_action_t *pe__new_rsc_pseudo_action(pe_resource_t *rsc, const char *task,
                                        bool optional, bool runnable);
 
 void pe__create_promotable_pseudo_ops(pe_resource_t *clone, bool any_promoting,
                                       bool any_demoting);
 
 bool pe_can_fence(const pe_working_set_t *data_set, const pe_node_t *node);
 
 void add_hash_param(GHashTable * hash, const char *name, const char *value);
 
 /*!
  * \internal
  * \enum pe__rsc_node
  * \brief Type of resource location lookup to perform
  */
 enum pe__rsc_node {
     pe__rsc_node_assigned = 0,  //!< Where resource is assigned
     pe__rsc_node_current  = 1,  //!< Where resource is running
 
     // @COMPAT: Use in native_location() at a compatibility break
     pe__rsc_node_pending  = 2,  //!< Where resource is pending
 };
 
 char *native_parameter(pe_resource_t * rsc, pe_node_t * node, gboolean create, const char *name,
                        pe_working_set_t * data_set);
 pe_node_t *native_location(const pe_resource_t *rsc, GList **list, int current);
 
 void pe_metadata(pcmk__output_t *out);
 void verify_pe_options(GHashTable * options);
 
 void native_add_running(pe_resource_t * rsc, pe_node_t * node, pe_working_set_t * data_set, gboolean failed);
 
 gboolean native_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean group_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean clone_unpack(pe_resource_t * rsc, pe_working_set_t * data_set);
 gboolean pe__unpack_bundle(pe_resource_t *rsc, pe_working_set_t *data_set);
 
 pe_resource_t *native_find_rsc(pe_resource_t *rsc, const char *id, const pe_node_t *node,
                                int flags);
 
 gboolean native_active(pe_resource_t * rsc, gboolean all);
 gboolean group_active(pe_resource_t * rsc, gboolean all);
 gboolean clone_active(pe_resource_t * rsc, gboolean all);
 gboolean pe__bundle_active(pe_resource_t *rsc, gboolean all);
 
 //! \deprecated This function will be removed in a future release
 void native_print(pe_resource_t *rsc, const char *pre_text, long options,
                   void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void group_print(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void clone_print(pe_resource_t *rsc, const char *pre_text, long options,
                  void *print_data);
 
 //! \deprecated This function will be removed in a future release
 void pe__print_bundle(pe_resource_t *rsc, const char *pre_text, long options,
                       void *print_data);
 
 gchar *pcmk__native_output_string(const pe_resource_t *rsc, const char *name,
                                   const pe_node_t *node, uint32_t show_opts,
                                   const char *target_role, bool show_nodes);
 
 int pe__name_and_nvpairs_xml(pcmk__output_t *out, bool is_list, const char *tag_name
                          , size_t pairs_count, ...);
 char *pe__node_display_name(pe_node_t *node, bool print_detail);
 
 
 // Clone notifications (pe_notif.c)
 void pe__order_notifs_after_fencing(const pe_action_t *action,
                                     pe_resource_t *rsc,
                                     pe_action_t *stonith_op);
 
 
 static inline const char *
 pe__rsc_bool_str(const pe_resource_t *rsc, uint64_t rsc_flag)
 {
     return pcmk__btoa(pcmk_is_set(rsc->flags, rsc_flag));
 }
 
 int pe__clone_xml(pcmk__output_t *out, va_list args);
 int pe__clone_default(pcmk__output_t *out, va_list args);
 int pe__group_xml(pcmk__output_t *out, va_list args);
 int pe__group_default(pcmk__output_t *out, va_list args);
 int pe__bundle_xml(pcmk__output_t *out, va_list args);
 int pe__bundle_html(pcmk__output_t *out, va_list args);
 int pe__bundle_text(pcmk__output_t *out, va_list args);
 int pe__node_html(pcmk__output_t *out, va_list args);
 int pe__node_text(pcmk__output_t *out, va_list args);
 int pe__node_xml(pcmk__output_t *out, va_list args);
 int pe__resource_xml(pcmk__output_t *out, va_list args);
 int pe__resource_html(pcmk__output_t *out, va_list args);
 int pe__resource_text(pcmk__output_t *out, va_list args);
 
 void native_free(pe_resource_t * rsc);
 void group_free(pe_resource_t * rsc);
 void clone_free(pe_resource_t * rsc);
 void pe__free_bundle(pe_resource_t *rsc);
 
 enum rsc_role_e native_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e group_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e clone_resource_state(const pe_resource_t * rsc, gboolean current);
 enum rsc_role_e pe__bundle_resource_state(const pe_resource_t *rsc,
                                           gboolean current);
 
 void pe__count_common(pe_resource_t *rsc);
 void pe__count_bundle(pe_resource_t *rsc);
 
 void common_free(pe_resource_t * rsc);
 
 pe_node_t *pe__copy_node(const pe_node_t *this_node);
 extern time_t get_effective_time(pe_working_set_t * data_set);
 
 /* Failure handling utilities (from failcounts.c) */
 
 // bit flags for fail count handling options
 enum pe_fc_flags_e {
     pe_fc_default   = (1 << 0),
     pe_fc_effective = (1 << 1), // don't count expired failures
     pe_fc_fillers   = (1 << 2), // if container, include filler failures in count
 };
 
 int pe_get_failcount(const pe_node_t *node, pe_resource_t *rsc,
                      time_t *last_failure, uint32_t flags,
                      const xmlNode *xml_op);
 
 pe_action_t *pe__clear_failcount(pe_resource_t *rsc, const pe_node_t *node,
                                  const char *reason,
                                  pe_working_set_t *data_set);
 
 /* Functions for finding/counting a resource's active nodes */
 
 bool pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
                            pe_node_t **active, unsigned int *count_all,
                            unsigned int *count_clean);
 
 pe_node_t *pe__find_active_requires(const pe_resource_t *rsc,
                                     unsigned int *count);
 
 static inline pe_node_t *
 pe__current_node(const pe_resource_t *rsc)
 {
     return (rsc == NULL)? NULL : rsc->fns->active_node(rsc, NULL, NULL);
 }
 
 
 /* Binary like operators for lists of nodes */
 extern void node_list_exclude(GHashTable * list, GList *list2, gboolean merge_scores);
 
 GHashTable *pe__node_list2table(const GList *list);
 
 static inline gpointer
 pe_hash_table_lookup(GHashTable * hash, gconstpointer key)
 {
     if (hash) {
         return g_hash_table_lookup(hash, key);
     }
     return NULL;
 }
 
 extern pe_action_t *get_pseudo_op(const char *name, pe_working_set_t * data_set);
 extern gboolean order_actions(pe_action_t * lh_action, pe_action_t * rh_action, enum pe_ordering order);
 
 void pe__show_node_scores_as(const char *file, const char *function,
                              int line, bool to_log, const pe_resource_t *rsc,
                              const char *comment, GHashTable *nodes,
                              pe_working_set_t *data_set);
 
 #define pe__show_node_scores(level, rsc, text, nodes, data_set)    \
         pe__show_node_scores_as(__FILE__, __func__, __LINE__,      \
                                 (level), (rsc), (text), (nodes), (data_set))
 
 xmlNode *find_rsc_op_entry(const pe_resource_t *rsc, const char *key);
 
 pe_action_t *custom_action(pe_resource_t *rsc, char *key, const char *task,
                            const pe_node_t *on_node, gboolean optional,
                            gboolean foo, pe_working_set_t *data_set);
 
 #  define delete_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DELETE, 0)
 #  define delete_action(rsc, node, optional) custom_action(		\
 		rsc, delete_key(rsc), CRMD_ACTION_DELETE, node,		\
 		optional, TRUE, rsc->cluster);
 
 #  define stopped_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOPPED, 0)
 #  define stopped_action(rsc, node, optional) custom_action(		\
 		rsc, stopped_key(rsc), CRMD_ACTION_STOPPED, node,	\
 		optional, TRUE, rsc->cluster);
 
 #  define stop_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STOP, 0)
 #  define stop_action(rsc, node, optional) custom_action(			\
 		rsc, stop_key(rsc), CRMD_ACTION_STOP, node,		\
 		optional, TRUE, rsc->cluster);
 
 #  define reload_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_RELOAD_AGENT, 0)
 #  define start_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_START, 0)
 #  define start_action(rsc, node, optional) custom_action(		\
 		rsc, start_key(rsc), CRMD_ACTION_START, node,		\
 		optional, TRUE, rsc->cluster)
 
 #  define started_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_STARTED, 0)
 #  define started_action(rsc, node, optional) custom_action(		\
 		rsc, started_key(rsc), CRMD_ACTION_STARTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define promote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTE, 0)
 #  define promote_action(rsc, node, optional) custom_action(		\
 		rsc, promote_key(rsc), CRMD_ACTION_PROMOTE, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define promoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_PROMOTED, 0)
 #  define promoted_action(rsc, node, optional) custom_action(		\
 		rsc, promoted_key(rsc), CRMD_ACTION_PROMOTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 #  define demote_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTE, 0)
 #  define demote_action(rsc, node, optional) custom_action(		\
 		rsc, demote_key(rsc), CRMD_ACTION_DEMOTE, node,		\
 		optional, TRUE, rsc->cluster)
 
 #  define demoted_key(rsc) pcmk__op_key(rsc->id, CRMD_ACTION_DEMOTED, 0)
 #  define demoted_action(rsc, node, optional) custom_action(		\
 		rsc, demoted_key(rsc), CRMD_ACTION_DEMOTED, node,	\
 		optional, TRUE, rsc->cluster)
 
 extern int pe_get_configured_timeout(pe_resource_t *rsc, const char *action,
                                      pe_working_set_t *data_set);
 
 pe_action_t *find_first_action(const GList *input, const char *uuid,
                                const char *task, const pe_node_t *on_node);
 
 enum action_tasks get_complex_task(const pe_resource_t *rsc, const char *name);
 
 extern GList *find_actions(GList *input, const char *key, const pe_node_t *on_node);
 GList *find_actions_exact(GList *input, const char *key,
                           const pe_node_t *on_node);
 GList *pe__resource_actions(const pe_resource_t *rsc, const pe_node_t *node,
                             const char *task, bool require_node);
 
 extern void pe_free_action(pe_action_t * action);
 
 void resource_location(pe_resource_t *rsc, const pe_node_t *node, int score,
                        const char *tag, pe_working_set_t *data_set);
 
 extern int pe__is_newer_op(const xmlNode *xml_a, const xmlNode *xml_b,
                            bool same_node_default);
 extern gint sort_op_by_callid(gconstpointer a, gconstpointer b);
 gboolean get_target_role(const pe_resource_t *rsc, enum rsc_role_e *role);
 void pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role,
                        const char *why);
 
 pe_resource_t *find_clone_instance(const pe_resource_t *rsc,
                                    const char *sub_id);
 
 extern void destroy_ticket(gpointer data);
 extern pe_ticket_t *ticket_new(const char *ticket_id, pe_working_set_t * data_set);
 
 // Resources for manipulating resource names
 const char *pe_base_name_end(const char *id);
 char *clone_strip(const char *last_rsc_id);
 char *clone_zero(const char *last_rsc_id);
 
 static inline bool
 pe_base_name_eq(const pe_resource_t *rsc, const char *id)
 {
     if (id && rsc && rsc->id) {
         // Number of characters in rsc->id before any clone suffix
         size_t base_len = pe_base_name_end(rsc->id) - rsc->id + 1;
 
         return (strlen(id) == base_len) && !strncmp(id, rsc->id, base_len);
     }
     return false;
 }
 
 int pe__target_rc_from_xml(const xmlNode *xml_op);
 
 gint pe__cmp_node_name(gconstpointer a, gconstpointer b);
 bool is_set_recursive(const pe_resource_t *rsc, long long flag, bool any);
 
 enum rsc_digest_cmp_val {
     /*! Digests are the same */
     RSC_DIGEST_MATCH = 0,
     /*! Params that require a restart changed */
     RSC_DIGEST_RESTART,
     /*! Some parameter changed.  */
     RSC_DIGEST_ALL,
     /*! rsc op didn't have a digest associated with it, so
      *  it is unknown if parameters changed or not. */
     RSC_DIGEST_UNKNOWN,
 };
 
 typedef struct op_digest_cache_s {
     enum rsc_digest_cmp_val rc;
     xmlNode *params_all;
     xmlNode *params_secure;
     xmlNode *params_restart;
     char *digest_all_calc;
     char *digest_secure_calc;
     char *digest_restart_calc;
 } op_digest_cache_t;
 
 op_digest_cache_t *pe__calculate_digests(pe_resource_t *rsc, const char *task,
                                          guint *interval_ms,
                                          const pe_node_t *node,
                                          const xmlNode *xml_op,
                                          GHashTable *overrides,
                                          bool calc_secure,
                                          pe_working_set_t *data_set);
 
 void pe__free_digests(gpointer ptr);
 
 op_digest_cache_t *rsc_action_digest_cmp(pe_resource_t *rsc,
                                          const xmlNode *xml_op,
                                          pe_node_t *node,
                                          pe_working_set_t *data_set);
 
 pe_action_t *pe_fence_op(pe_node_t *node, const char *op, bool optional,
                          const char *reason, bool priority_delay,
                          pe_working_set_t *data_set);
 void trigger_unfencing(pe_resource_t *rsc, pe_node_t *node,
                        const char *reason, pe_action_t *dependency,
                        pe_working_set_t *data_set);
 
 char *pe__action2reason(const pe_action_t *action, enum pe_action_flags flag);
 void pe_action_set_reason(pe_action_t *action, const char *reason, bool overwrite);
 void pe__add_action_expected_result(pe_action_t *action, int expected_result);
 
 void pe__set_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_recursive(pe_resource_t *rsc, uint64_t flags);
 void pe__clear_resource_flags_on_all(pe_working_set_t *data_set, uint64_t flag);
 
 gboolean add_tag_ref(GHashTable * tags, const char * tag_name,  const char * obj_ref);
 
 //! \deprecated This function will be removed in a future release
 void print_rscs_brief(GList *rsc_list, const char * pre_text, long options,
                       void * print_data, gboolean print_all);
 int pe__rscs_brief_output(pcmk__output_t *out, GList *rsc_list, unsigned int options);
 void pe_fence_node(pe_working_set_t * data_set, pe_node_t * node, const char *reason, bool priority_delay);
 
 pe_node_t *pe_create_node(const char *id, const char *uname, const char *type,
                           const char *score, pe_working_set_t * data_set);
 
 //! \deprecated This function will be removed in a future release
 void common_print(pe_resource_t *rsc, const char *pre_text, const char *name,
                   const pe_node_t *node, long options, void *print_data);
 int pe__common_output_text(pcmk__output_t *out, const pe_resource_t *rsc,
                            const char *name, const pe_node_t *node,
                            unsigned int options);
 int pe__common_output_html(pcmk__output_t *out, const pe_resource_t *rsc,
                            const char *name, const pe_node_t *node,
                            unsigned int options);
 
 //! A single instance of a bundle
 typedef struct {
     int offset;                 //!< 0-origin index of this instance in bundle
     char *ipaddr;               //!< IP address associated with this instance
     pe_node_t *node;            //!< Node created for this instance
     pe_resource_t *ip;          //!< IP address resource for ipaddr
     pe_resource_t *child;       //!< Instance of bundled resource
     pe_resource_t *container;   //!< Container associated with this instance
     pe_resource_t *remote;      //!< Pacemaker Remote connection into container
 } pe__bundle_replica_t;
 
 GList *pe__bundle_containers(const pe_resource_t *bundle);
 
 int pe__bundle_max(const pe_resource_t *rsc);
 bool pe__node_is_bundle_instance(const pe_resource_t *bundle,
                                  const pe_node_t *node);
 pe_resource_t *pe__bundled_resource(const pe_resource_t *rsc);
 const pe_resource_t *pe__get_rsc_in_container(const pe_resource_t *instance);
 pe_resource_t *pe__first_container(const pe_resource_t *bundle);
 void pe__foreach_bundle_replica(pe_resource_t *bundle,
                                 bool (*fn)(pe__bundle_replica_t *, void *),
                                 void *user_data);
 void pe__foreach_const_bundle_replica(const pe_resource_t *bundle,
                                       bool (*fn)(const pe__bundle_replica_t *,
                                                  void *),
                                       void *user_data);
 pe_resource_t *pe__find_bundle_replica(const pe_resource_t *bundle,
                                        const pe_node_t *node);
 bool pe__bundle_needs_remote_name(pe_resource_t *rsc);
 const char *pe__add_bundle_remote_name(pe_resource_t *rsc,
                                        pe_working_set_t *data_set,
                                        xmlNode *xml, const char *field);
 
-const char *pe_node_attribute_calculated(const pe_node_t *node,
-                                         const char *name,
-                                         const pe_resource_t *rsc,
-                                         enum pe__rsc_node node_type);
+const char *pe__node_attribute_calculated(const pe_node_t *node,
+                                          const char *name,
+                                          const pe_resource_t *rsc,
+                                          enum pe__rsc_node node_type,
+                                          bool force_host);
 const char *pe_node_attribute_raw(const pe_node_t *node, const char *name);
 bool pe__is_universal_clone(const pe_resource_t *rsc,
                             const pe_working_set_t *data_set);
 void pe__add_param_check(const xmlNode *rsc_op, pe_resource_t *rsc,
                          pe_node_t *node, enum pe_check_parameters,
                          pe_working_set_t *data_set);
 void pe__foreach_param_check(pe_working_set_t *data_set,
                              void (*cb)(pe_resource_t*, pe_node_t*,
                                         const xmlNode*,
                                         enum pe_check_parameters));
 void pe__free_param_checks(pe_working_set_t *data_set);
 
 bool pe__shutdown_requested(const pe_node_t *node);
 void pe__update_recheck_time(time_t recheck, pe_working_set_t *data_set);
 
 /*!
  * \internal
  * \brief Register xml formatting message functions.
  *
  * \param[in,out] out  Output object to register messages with
  */
 void pe__register_messages(pcmk__output_t *out);
 
 void pe__unpack_dataset_nvpairs(const xmlNode *xml_obj, const char *set_name,
                                 const pe_rule_eval_data_t *rule_data,
                                 GHashTable *hash, const char *always_first,
                                 gboolean overwrite, pe_working_set_t *data_set);
 
 bool pe__resource_is_disabled(const pe_resource_t *rsc);
 pe_action_t *pe__clear_resource_history(pe_resource_t *rsc,
                                         const pe_node_t *node,
                                         pe_working_set_t *data_set);
 
 GList *pe__rscs_with_tag(pe_working_set_t *data_set, const char *tag_name);
 GList *pe__unames_with_tag(pe_working_set_t *data_set, const char *tag_name);
 bool pe__rsc_has_tag(pe_working_set_t *data_set, const char *rsc, const char *tag);
 bool pe__uname_has_tag(pe_working_set_t *data_set, const char *node, const char *tag);
 
 bool pe__rsc_running_on_only(const pe_resource_t *rsc, const pe_node_t *node);
 bool pe__rsc_running_on_any(pe_resource_t *rsc, GList *node_list);
 GList *pe__filter_rsc_list(GList *rscs, GList *filter);
 GList * pe__build_node_name_list(pe_working_set_t *data_set, const char *s);
 GList * pe__build_rsc_list(pe_working_set_t *data_set, const char *s);
 
 bool pcmk__rsc_filtered_by_node(pe_resource_t *rsc, GList *only_node);
 
 gboolean pe__bundle_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 gboolean pe__clone_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__group_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                gboolean check_parent);
 gboolean pe__native_is_filtered(const pe_resource_t *rsc, GList *only_rsc,
                                 gboolean check_parent);
 
 xmlNode *pe__failed_probe_for_rsc(const pe_resource_t *rsc, const char *name);
 
 const char *pe__clone_child_id(const pe_resource_t *rsc);
 
 int pe__sum_node_health_scores(const pe_node_t *node, int base_health);
 int pe__node_health(pe_node_t *node);
 
 static inline enum pcmk__health_strategy
 pe__health_strategy(pe_working_set_t *data_set)
 {
     return pcmk__parse_health_strategy(pe_pref(data_set->config_hash,
                                                PCMK__OPT_NODE_HEALTH_STRATEGY));
 }
 
 static inline int
 pe__health_score(const char *option, pe_working_set_t *data_set)
 {
     return char2score(pe_pref(data_set->config_hash, option));
 }
 
 /*!
  * \internal
  * \brief Return a string suitable for logging as a node name
  *
  * \param[in] node  Node to return a node name string for
  *
  * \return Node name if available, otherwise node ID if available,
  *         otherwise "unspecified node" if node is NULL or "unidentified node"
  *         if node has neither a name nor ID.
  */
 static inline const char *
 pe__node_name(const pe_node_t *node)
 {
     if (node == NULL) {
         return "unspecified node";
 
     } else if (node->details->uname != NULL) {
         return node->details->uname;
 
     } else if (node->details->id != NULL) {
         return node->details->id;
 
     } else {
         return "unidentified node";
     }
 }
 
 /*!
  * \internal
  * \brief Check whether two node objects refer to the same node
  *
  * \param[in] node1  First node object to compare
  * \param[in] node2  Second node object to compare
  *
  * \return true if \p node1 and \p node2 refer to the same node
  */
 static inline bool
 pe__same_node(const pe_node_t *node1, const pe_node_t *node2)
 {
     return (node1 != NULL) && (node2 != NULL)
            && (node1->details == node2->details);
 }
 
 /*!
  * \internal
  * \brief Get the operation key from an action history entry
  *
  * \param[in] xml  Action history entry
  *
  * \return Entry's operation key
  */
 static inline const char *
 pe__xe_history_key(const xmlNode *xml)
 {
     if (xml == NULL) {
         return NULL;
     } else {
         /* @COMPAT Pacemaker <= 1.1.5 did not add the key, and used the ID
          * instead. Checking for that allows us to process old saved CIBs,
          * including some regression tests.
          */
         const char *key = crm_element_value(xml, XML_LRM_ATTR_TASK_KEY);
 
         return pcmk__str_empty(key)? ID(xml) : key;
     }
 }
 
 #endif
diff --git a/lib/pacemaker/pcmk_sched_location.c b/lib/pacemaker/pcmk_sched_location.c
index 1b3ab3bd93..09ea137d71 100644
--- a/lib/pacemaker/pcmk_sched_location.c
+++ b/lib/pacemaker/pcmk_sched_location.c
@@ -1,674 +1,674 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 #include <glib.h>
 
 #include <crm/crm.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/rules.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 static int
 get_node_score(const char *rule, const char *score, bool raw,
                pe_node_t *node, pe_resource_t *rsc)
 {
     int score_f = 0;
 
     if (score == NULL) {
         pe_err("Rule %s: no score specified.  Assuming 0.", rule);
 
     } else if (raw) {
         score_f = char2score(score);
 
     } else {
         const char *attr_score = NULL;
 
-        attr_score = pe_node_attribute_calculated(node, score, rsc,
-                                                  pe__rsc_node_current);
+        attr_score = pe__node_attribute_calculated(node, score, rsc,
+                                                   pe__rsc_node_current, false);
 
         if (attr_score == NULL) {
             crm_debug("Rule %s: %s did not have a value for %s",
                       rule, pe__node_name(node), score);
             score_f = -INFINITY;
 
         } else {
             crm_debug("Rule %s: %s had value %s for %s",
                       rule, pe__node_name(node), attr_score, score);
             score_f = char2score(attr_score);
         }
     }
     return score_f;
 }
 
 static pe__location_t *
 generate_location_rule(pe_resource_t *rsc, xmlNode *rule_xml,
                        const char *discovery, crm_time_t *next_change,
                        pe_re_match_data_t *re_match_data)
 {
     const char *rule_id = NULL;
     const char *score = NULL;
     const char *boolean = NULL;
     const char *role = NULL;
 
     GList *iter = NULL;
     GList *nodes = NULL;
 
     bool do_and = true;
     bool accept = true;
     bool raw_score = true;
     bool score_allocated = false;
 
     pe__location_t *location_rule = NULL;
 
     rule_xml = expand_idref(rule_xml, rsc->cluster->input);
     if (rule_xml == NULL) {
         return NULL;
     }
 
     rule_id = crm_element_value(rule_xml, XML_ATTR_ID);
     boolean = crm_element_value(rule_xml, XML_RULE_ATTR_BOOLEAN_OP);
     role = crm_element_value(rule_xml, XML_RULE_ATTR_ROLE);
 
     crm_trace("Processing rule: %s", rule_id);
 
     if ((role != NULL) && (text2role(role) == RSC_ROLE_UNKNOWN)) {
         pe_err("Bad role specified for %s: %s", rule_id, role);
         return NULL;
     }
 
     score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE);
     if (score == NULL) {
         score = crm_element_value(rule_xml, XML_RULE_ATTR_SCORE_ATTRIBUTE);
         if (score != NULL) {
             raw_score = false;
         }
     }
     if (pcmk__str_eq(boolean, "or", pcmk__str_casei)) {
         do_and = false;
     }
 
     location_rule = pcmk__new_location(rule_id, rsc, 0, discovery, NULL);
 
     if (location_rule == NULL) {
         return NULL;
     }
 
     if ((re_match_data != NULL) && (re_match_data->nregs > 0)
         && (re_match_data->pmatch[0].rm_so != -1) && !raw_score) {
 
         char *result = pe_expand_re_matches(score, re_match_data);
 
         if (result != NULL) {
             score = result;
             score_allocated = true;
         }
     }
 
     if (role != NULL) {
         crm_trace("Setting role filter: %s", role);
         location_rule->role_filter = text2role(role);
         if (location_rule->role_filter == RSC_ROLE_UNPROMOTED) {
             /* Any promotable clone cannot be promoted without being in the
              * unpromoted role first. Ergo, any constraint for the unpromoted
              * role applies to every role.
              */
             location_rule->role_filter = RSC_ROLE_UNKNOWN;
         }
     }
     if (do_and) {
         nodes = pcmk__copy_node_list(rsc->cluster->nodes, true);
         for (iter = nodes; iter != NULL; iter = iter->next) {
             pe_node_t *node = iter->data;
 
             node->weight = get_node_score(rule_id, score, raw_score, node, rsc);
         }
     }
 
     for (iter = rsc->cluster->nodes; iter != NULL; iter = iter->next) {
         int score_f = 0;
         pe_node_t *node = iter->data;
         pe_match_data_t match_data = {
             .re = re_match_data,
             .params = pe_rsc_params(rsc, node, rsc->cluster),
             .meta = rsc->meta,
         };
 
         accept = pe_test_rule(rule_xml, node->details->attrs, RSC_ROLE_UNKNOWN,
                               rsc->cluster->now, next_change, &match_data);
 
         crm_trace("Rule %s %s on %s", ID(rule_xml), accept? "passed" : "failed",
                   pe__node_name(node));
 
         score_f = get_node_score(rule_id, score, raw_score, node, rsc);
 
         if (accept) {
             pe_node_t *local = pe_find_node_id(nodes, node->details->id);
 
             if ((local == NULL) && do_and) {
                 continue;
 
             } else if (local == NULL) {
                 local = pe__copy_node(node);
                 nodes = g_list_append(nodes, local);
             }
 
             if (!do_and) {
                 local->weight = pcmk__add_scores(local->weight, score_f);
             }
             crm_trace("%s has score %s after %s", pe__node_name(node),
                       pcmk_readable_score(local->weight), rule_id);
 
         } else if (do_and && !accept) {
             // Remove it
             pe_node_t *delete = pe_find_node_id(nodes, node->details->id);
 
             if (delete != NULL) {
                 nodes = g_list_remove(nodes, delete);
                 crm_trace("%s did not match", pe__node_name(node));
             }
             free(delete);
         }
     }
 
     if (score_allocated) {
         free((char *)score);
     }
 
     location_rule->node_list_rh = nodes;
     if (location_rule->node_list_rh == NULL) {
         crm_trace("No matching nodes for rule %s", rule_id);
         return NULL;
     }
 
     crm_trace("%s: %d nodes matched",
               rule_id, g_list_length(location_rule->node_list_rh));
     return location_rule;
 }
 
 static void
 unpack_rsc_location(xmlNode *xml_obj, pe_resource_t *rsc, const char *role,
                     const char *score, pe_re_match_data_t *re_match_data)
 {
     pe__location_t *location = NULL;
     const char *rsc_id = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *node = crm_element_value(xml_obj, XML_CIB_TAG_NODE);
     const char *discovery = crm_element_value(xml_obj,
                                               XML_LOCATION_ATTR_DISCOVERY);
 
     if (rsc == NULL) {
         pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                           "does not exist", id, rsc_id);
         return;
     }
 
     if (score == NULL) {
         score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
     }
 
     if ((node != NULL) && (score != NULL)) {
         int score_i = char2score(score);
         pe_node_t *match = pe_find_node(rsc->cluster->nodes, node);
 
         if (!match) {
             return;
         }
         location = pcmk__new_location(id, rsc, score_i, discovery, match);
 
     } else {
         bool empty = true;
         crm_time_t *next_change = crm_time_new_undefined();
 
         /* This loop is logically parallel to pe_evaluate_rules(), except
          * instead of checking whether any rule is active, we set up location
          * constraints for each active rule.
          */
         for (xmlNode *rule_xml = first_named_child(xml_obj, XML_TAG_RULE);
              rule_xml != NULL; rule_xml = crm_next_same_xml(rule_xml)) {
             empty = false;
             crm_trace("Unpacking %s/%s", id, ID(rule_xml));
             generate_location_rule(rsc, rule_xml, discovery, next_change,
                                    re_match_data);
         }
 
         if (empty) {
             pcmk__config_err("Ignoring constraint '%s' because it contains "
                              "no rules", id);
         }
 
         /* If there is a point in the future when the evaluation of a rule will
          * change, make sure the scheduler is re-run by that time.
          */
         if (crm_time_is_defined(next_change)) {
             time_t t = (time_t) crm_time_get_seconds_since_epoch(next_change);
 
             pe__update_recheck_time(t, rsc->cluster);
         }
         crm_time_free(next_change);
         return;
     }
 
     if (role == NULL) {
         role = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE);
     }
 
     if ((location != NULL) && (role != NULL)) {
         if (text2role(role) == RSC_ROLE_UNKNOWN) {
             pe_err("Invalid constraint %s: Bad role %s", id, role);
             return;
 
         } else {
             enum rsc_role_e r = text2role(role);
             switch (r) {
                 case RSC_ROLE_UNKNOWN:
                 case RSC_ROLE_STARTED:
                 case RSC_ROLE_UNPROMOTED:
                     /* Applies to all */
                     location->role_filter = RSC_ROLE_UNKNOWN;
                     break;
                 default:
                     location->role_filter = r;
                     break;
             }
         }
     }
 }
 
 static void
 unpack_simple_location(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
 
     if (value) {
         pe_resource_t *rsc;
 
         rsc = pcmk__find_constraint_resource(data_set->resources, value);
         unpack_rsc_location(xml_obj, rsc, NULL, NULL, NULL);
     }
 
     value = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE_PATTERN);
     if (value) {
         regex_t *r_patt = calloc(1, sizeof(regex_t));
         bool invert = false;
 
         if (value[0] == '!') {
             value++;
             invert = true;
         }
 
         if (regcomp(r_patt, value, REG_EXTENDED) != 0) {
             pcmk__config_err("Ignoring constraint '%s' because "
                              XML_LOC_ATTR_SOURCE_PATTERN
                              " has invalid value '%s'", id, value);
             free(r_patt);
             return;
         }
 
         for (GList *iter = data_set->resources; iter != NULL;
              iter = iter->next) {
 
             pe_resource_t *r = iter->data;
             int nregs = 0;
             regmatch_t *pmatch = NULL;
             int status;
 
             if (r_patt->re_nsub > 0) {
                 nregs = r_patt->re_nsub + 1;
             } else {
                 nregs = 1;
             }
             pmatch = calloc(nregs, sizeof(regmatch_t));
 
             status = regexec(r_patt, r->id, nregs, pmatch, 0);
 
             if (!invert && (status == 0)) {
                 pe_re_match_data_t re_match_data = {
                                                 .string = r->id,
                                                 .nregs = nregs,
                                                 .pmatch = pmatch
                                                };
 
                 crm_debug("'%s' matched '%s' for %s", r->id, value, id);
                 unpack_rsc_location(xml_obj, r, NULL, NULL, &re_match_data);
 
             } else if (invert && (status != 0)) {
                 crm_debug("'%s' is an inverted match of '%s' for %s",
                           r->id, value, id);
                 unpack_rsc_location(xml_obj, r, NULL, NULL, NULL);
 
             } else {
                 crm_trace("'%s' does not match '%s' for %s", r->id, value, id);
             }
 
             free(pmatch);
         }
 
         regfree(r_patt);
         free(r_patt);
     }
 }
 
 // \return Standard Pacemaker return code
 static int
 unpack_location_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
                      pe_working_set_t *data_set)
 {
     const char *id = NULL;
     const char *rsc_id = NULL;
     const char *state = NULL;
     pe_resource_t *rsc = NULL;
     pe_tag_t *tag = NULL;
     xmlNode *rsc_set = NULL;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return EINVAL);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          crm_element_name(xml_obj));
         return pcmk_rc_unpack_error;
     }
 
     // Check whether there are any resource sets with template or tag references
     *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
     if (*expanded_xml != NULL) {
         crm_log_xml_trace(*expanded_xml, "Expanded rsc_location");
         return pcmk_rc_ok;
     }
 
     rsc_id = crm_element_value(xml_obj, XML_LOC_ATTR_SOURCE);
     if (rsc_id == NULL) {
         return pcmk_rc_ok;
     }
 
     if (!pcmk__valid_resource_or_tag(data_set, rsc_id, &rsc, &tag)) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, rsc_id);
         return pcmk_rc_unpack_error;
 
     } else if (rsc != NULL) {
         // No template is referenced
         return pcmk_rc_ok;
     }
 
     state = crm_element_value(xml_obj, XML_RULE_ATTR_ROLE);
 
     *expanded_xml = copy_xml(xml_obj);
 
     // Convert any template or tag reference into constraint resource_set
     if (!pcmk__tag_to_set(*expanded_xml, &rsc_set, XML_LOC_ATTR_SOURCE,
                           false, data_set)) {
         free_xml(*expanded_xml);
         *expanded_xml = NULL;
         return pcmk_rc_unpack_error;
     }
 
     if (rsc_set != NULL) {
         if (state != NULL) {
             // Move "rsc-role" into converted resource_set as "role" attribute
             crm_xml_add(rsc_set, "role", state);
             xml_remove_prop(*expanded_xml, XML_RULE_ATTR_ROLE);
         }
         crm_log_xml_trace(*expanded_xml, "Expanded rsc_location");
 
     } else {
         // No sets
         free_xml(*expanded_xml);
         *expanded_xml = NULL;
     }
 
     return pcmk_rc_ok;
 }
 
 // \return Standard Pacemaker return code
 static int
 unpack_location_set(xmlNode *location, xmlNode *set, pe_working_set_t *data_set)
 {
     xmlNode *xml_rsc = NULL;
     pe_resource_t *resource = NULL;
     const char *set_id;
     const char *role;
     const char *local_score;
 
     CRM_CHECK(set != NULL, return EINVAL);
 
     set_id = ID(set);
     if (set_id == NULL) {
         pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_SET " without "
                          XML_ATTR_ID " in constraint '%s'",
                          pcmk__s(ID(location), "(missing ID)"));
         return pcmk_rc_unpack_error;
     }
 
     role = crm_element_value(set, "role");
     local_score = crm_element_value(set, XML_RULE_ATTR_SCORE);
 
     for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
          xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
         resource = pcmk__find_constraint_resource(data_set->resources,
                                                   ID(xml_rsc));
         if (resource == NULL) {
             pcmk__config_err("%s: No resource found for %s",
                              set_id, ID(xml_rsc));
             return pcmk_rc_unpack_error;
         }
 
         unpack_rsc_location(location, resource, role, local_score, NULL);
     }
 
     return pcmk_rc_ok;
 }
 
 void
 pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     xmlNode *set = NULL;
     bool any_sets = false;
 
     xmlNode *orig_xml = NULL;
     xmlNode *expanded_xml = NULL;
 
     if (unpack_location_tags(xml_obj, &expanded_xml, data_set) != pcmk_rc_ok) {
         return;
     }
 
     if (expanded_xml) {
         orig_xml = xml_obj;
         xml_obj = expanded_xml;
     }
 
     for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
          set = crm_next_same_xml(set)) {
 
         any_sets = true;
         set = expand_idref(set, data_set->input);
         if ((set == NULL) // Configuration error, message already logged
             || (unpack_location_set(xml_obj, set, data_set) != pcmk_rc_ok)) {
 
             if (expanded_xml) {
                 free_xml(expanded_xml);
             }
             return;
         }
     }
 
     if (expanded_xml) {
         free_xml(expanded_xml);
         xml_obj = orig_xml;
     }
 
     if (!any_sets) {
         unpack_simple_location(xml_obj, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Add a new location constraint to a cluster working set
  *
  * \param[in]     id             XML ID of location constraint
  * \param[in,out] rsc            Resource in location constraint
  * \param[in]     node_score     Constraint score
  * \param[in]     discover_mode  Resource discovery option for constraint
  * \param[in]     node           Node in constraint (or NULL if rule-based)
  *
  * \return Newly allocated location constraint
  * \note The result will be added to the cluster (via \p rsc) and should not be
  *       freed separately.
  */
 pe__location_t *
 pcmk__new_location(const char *id, pe_resource_t *rsc,
                    int node_score, const char *discover_mode, pe_node_t *node)
 {
     pe__location_t *new_con = NULL;
 
     if (id == NULL) {
         pe_err("Invalid constraint: no ID specified");
         return NULL;
 
     } else if (rsc == NULL) {
         pe_err("Invalid constraint %s: no resource specified", id);
         return NULL;
 
     } else if (node == NULL) {
         CRM_CHECK(node_score == 0, return NULL);
     }
 
     new_con = calloc(1, sizeof(pe__location_t));
     if (new_con != NULL) {
         new_con->id = strdup(id);
         new_con->rsc_lh = rsc;
         new_con->node_list_rh = NULL;
         new_con->role_filter = RSC_ROLE_UNKNOWN;
 
         if (pcmk__str_eq(discover_mode, "always",
                          pcmk__str_null_matches|pcmk__str_casei)) {
             new_con->discover_mode = pe_discover_always;
 
         } else if (pcmk__str_eq(discover_mode, "never", pcmk__str_casei)) {
             new_con->discover_mode = pe_discover_never;
 
         } else if (pcmk__str_eq(discover_mode, "exclusive", pcmk__str_casei)) {
             new_con->discover_mode = pe_discover_exclusive;
             rsc->exclusive_discover = TRUE;
 
         } else {
             pe_err("Invalid " XML_LOCATION_ATTR_DISCOVERY " value %s "
                    "in location constraint", discover_mode);
         }
 
         if (node != NULL) {
             pe_node_t *copy = pe__copy_node(node);
 
             copy->weight = node_score;
             new_con->node_list_rh = g_list_prepend(NULL, copy);
         }
 
         rsc->cluster->placement_constraints = g_list_prepend(
             rsc->cluster->placement_constraints, new_con);
         rsc->rsc_location = g_list_prepend(rsc->rsc_location, new_con);
     }
 
     return new_con;
 }
 
 /*!
  * \internal
  * \brief Apply all location constraints
  *
  * \param[in,out] data_set       Cluster working set
  */
 void
 pcmk__apply_locations(pe_working_set_t *data_set)
 {
     for (GList *iter = data_set->placement_constraints;
          iter != NULL; iter = iter->next) {
         pe__location_t *location = iter->data;
 
         location->rsc_lh->cmds->apply_location(location->rsc_lh, location);
     }
 }
 
 /*!
  * \internal
  * \brief Apply a location constraint to a resource's allowed node scores
  *
  * \param[in,out] rsc         Resource to apply constraint to
  * \param[in,out] location    Location constraint to apply
  *
  * \note This does not consider the resource's children, so the resource's
  *       apply_location() method should be used instead in most cases.
  */
 void
 pcmk__apply_location(pe_resource_t *rsc, pe__location_t *location)
 {
     bool need_role = false;
 
     CRM_ASSERT((rsc != NULL) && (location != NULL));
 
     // If a role was specified, ensure constraint is applicable
     need_role = (location->role_filter > RSC_ROLE_UNKNOWN);
     if (need_role && (location->role_filter != rsc->next_role)) {
         pe_rsc_trace(rsc,
                      "Not applying %s to %s because role will be %s not %s",
                      location->id, rsc->id, role2text(rsc->next_role),
                      role2text(location->role_filter));
         return;
     }
 
     if (location->node_list_rh == NULL) {
         pe_rsc_trace(rsc, "Not applying %s to %s because no nodes match",
                      location->id, rsc->id);
         return;
     }
 
     pe_rsc_trace(rsc, "Applying %s%s%s to %s", location->id,
                  (need_role? " for role " : ""),
                  (need_role? role2text(location->role_filter) : ""), rsc->id);
 
     for (GList *iter = location->node_list_rh;
          iter != NULL; iter = iter->next) {
 
         pe_node_t *node = iter->data;
         pe_node_t *allowed_node = NULL;
 
         allowed_node = (pe_node_t *) pe_hash_table_lookup(rsc->allowed_nodes,
                                                           node->details->id);
         if (allowed_node == NULL) {
             pe_rsc_trace(rsc, "* = %d on %s",
                          node->weight, pe__node_name(node));
             allowed_node = pe__copy_node(node);
             g_hash_table_insert(rsc->allowed_nodes,
                                 (gpointer) allowed_node->details->id,
                                 allowed_node);
         } else {
             pe_rsc_trace(rsc, "* + %d on %s",
                          node->weight, pe__node_name(node));
             allowed_node->weight = pcmk__add_scores(allowed_node->weight,
                                                     node->weight);
         }
 
         if (allowed_node->rsc_discover_mode < location->discover_mode) {
             if (location->discover_mode == pe_discover_exclusive) {
                 rsc->exclusive_discover = TRUE;
             }
             /* exclusive > never > always... always is default */
             allowed_node->rsc_discover_mode = location->discover_mode;
         }
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_promotable.c b/lib/pacemaker/pcmk_sched_promotable.c
index 1df4729af1..1201b3beca 100644
--- a/lib/pacemaker/pcmk_sched_promotable.c
+++ b/lib/pacemaker/pcmk_sched_promotable.c
@@ -1,1284 +1,1285 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Add implicit promotion ordering for a promotable instance
  *
  * \param[in,out] clone  Clone resource
  * \param[in,out] child  Instance of \p clone being ordered
  * \param[in,out] last   Previous instance ordered (NULL if \p child is first)
  */
 static void
 order_instance_promotion(pe_resource_t *clone, pe_resource_t *child,
                          pe_resource_t *last)
 {
     // "Promote clone" -> promote instance -> "clone promoted"
     pcmk__order_resource_actions(clone, RSC_PROMOTE, child, RSC_PROMOTE,
                                  pe_order_optional);
     pcmk__order_resource_actions(child, RSC_PROMOTE, clone, RSC_PROMOTED,
                                  pe_order_optional);
 
     // If clone is ordered, order this instance relative to last
     if ((last != NULL) && pe__clone_is_ordered(clone)) {
         pcmk__order_resource_actions(last, RSC_PROMOTE, child, RSC_PROMOTE,
                                      pe_order_optional);
     }
 }
 
 /*!
  * \internal
  * \brief Add implicit demotion ordering for a promotable instance
  *
  * \param[in,out] clone  Clone resource
  * \param[in,out] child  Instance of \p clone being ordered
  * \param[in]     last   Previous instance ordered (NULL if \p child is first)
  */
 static void
 order_instance_demotion(pe_resource_t *clone, pe_resource_t *child,
                         pe_resource_t *last)
 {
     // "Demote clone" -> demote instance -> "clone demoted"
     pcmk__order_resource_actions(clone, RSC_DEMOTE, child, RSC_DEMOTE,
                                  pe_order_implies_first_printed);
     pcmk__order_resource_actions(child, RSC_DEMOTE, clone, RSC_DEMOTED,
                                  pe_order_implies_then_printed);
 
     // If clone is ordered, order this instance relative to last
     if ((last != NULL) && pe__clone_is_ordered(clone)) {
         pcmk__order_resource_actions(child, RSC_DEMOTE, last, RSC_DEMOTE,
                                      pe_order_optional);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether an instance will be promoted or demoted
  *
  * \param[in]  rsc        Instance to check
  * \param[out] demoting   If \p rsc will be demoted, this will be set to true
  * \param[out] promoting  If \p rsc will be promoted, this will be set to true
  */
 static void
 check_for_role_change(const pe_resource_t *rsc, bool *demoting, bool *promoting)
 {
     const GList *iter = NULL;
 
     // If this is a cloned group, check group members recursively
     if (rsc->children != NULL) {
         for (iter = rsc->children; iter != NULL; iter = iter->next) {
             check_for_role_change((const pe_resource_t *) iter->data,
                                   demoting, promoting);
         }
         return;
     }
 
     for (iter = rsc->actions; iter != NULL; iter = iter->next) {
         const pe_action_t *action = (const pe_action_t *) iter->data;
 
         if (*promoting && *demoting) {
             return;
 
         } else if (pcmk_is_set(action->flags, pe_action_optional)) {
             continue;
 
         } else if (pcmk__str_eq(RSC_DEMOTE, action->task, pcmk__str_none)) {
             *demoting = true;
 
         } else if (pcmk__str_eq(RSC_PROMOTE, action->task, pcmk__str_none)) {
             *promoting = true;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Add promoted-role location constraint scores to an instance's priority
  *
  * Adjust a promotable clone instance's promotion priority by the scores of any
  * location constraints in a list that are both limited to the promoted role and
  * for the node where the instance will be placed.
  *
  * \param[in,out] child                 Promotable clone instance
  * \param[in]     location_constraints  List of location constraints to apply
  * \param[in]     chosen                Node where \p child will be placed
  */
 static void
 apply_promoted_locations(pe_resource_t *child,
                          const GList *location_constraints,
                          const pe_node_t *chosen)
 {
     for (const GList *iter = location_constraints; iter; iter = iter->next) {
         const pe__location_t *location = iter->data;
         const pe_node_t *constraint_node = NULL;
 
         if (location->role_filter == RSC_ROLE_PROMOTED) {
             constraint_node = pe_find_node_id(location->node_list_rh,
                                               chosen->details->id);
         }
         if (constraint_node != NULL) {
             int new_priority = pcmk__add_scores(child->priority,
                                                 constraint_node->weight);
 
             pe_rsc_trace(child,
                          "Applying location %s to %s promotion priority on %s: "
                          "%s + %s = %s",
                          location->id, child->id,
                          pe__node_name(constraint_node),
                          pcmk_readable_score(child->priority),
                          pcmk_readable_score(constraint_node->weight),
                          pcmk_readable_score(new_priority));
             child->priority = new_priority;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Get the node that an instance will be promoted on
  *
  * \param[in] rsc  Promotable clone instance to check
  *
  * \return Node that \p rsc will be promoted on, or NULL if none
  */
 static pe_node_t *
 node_to_be_promoted_on(const pe_resource_t *rsc)
 {
     pe_node_t *node = NULL;
     pe_node_t *local_node = NULL;
     const pe_resource_t *parent = NULL;
 
     // If this is a cloned group, bail if any group member can't be promoted
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child = (pe_resource_t *) iter->data;
 
         if (node_to_be_promoted_on(child) == NULL) {
             pe_rsc_trace(rsc,
                          "%s can't be promoted because member %s can't",
                          rsc->id, child->id);
             return NULL;
         }
     }
 
     node = rsc->fns->location(rsc, NULL, FALSE);
     if (node == NULL) {
         pe_rsc_trace(rsc, "%s can't be promoted because it won't be active",
                      rsc->id);
         return NULL;
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         if (rsc->fns->state(rsc, TRUE) == RSC_ROLE_PROMOTED) {
             crm_notice("Unmanaged instance %s will be left promoted on %s",
                        rsc->id, pe__node_name(node));
         } else {
             pe_rsc_trace(rsc, "%s can't be promoted because it is unmanaged",
                          rsc->id);
             return NULL;
         }
 
     } else if (rsc->priority < 0) {
         pe_rsc_trace(rsc,
                      "%s can't be promoted because its promotion priority %d "
                      "is negative",
                      rsc->id, rsc->priority);
         return NULL;
 
     } else if (!pcmk__node_available(node, false, true)) {
         pe_rsc_trace(rsc, "%s can't be promoted because %s can't run resources",
                      rsc->id, pe__node_name(node));
         return NULL;
     }
 
     parent = pe__const_top_resource(rsc, false);
     local_node = pe_hash_table_lookup(parent->allowed_nodes, node->details->id);
 
     if (local_node == NULL) {
         /* It should not be possible for the scheduler to have assigned the
          * instance to a node where its parent is not allowed, but it's good to
          * have a fail-safe.
          */
         if (pcmk_is_set(rsc->flags, pe_rsc_managed)) {
             crm_warn("%s can't be promoted because %s is not allowed on %s "
                      "(scheduler bug?)",
                      rsc->id, parent->id, pe__node_name(node));
         } // else the instance is unmanaged and already promoted
         return NULL;
 
     } else if ((local_node->count >= pe__clone_promoted_node_max(parent))
                && pcmk_is_set(rsc->flags, pe_rsc_managed)) {
         pe_rsc_trace(rsc,
                      "%s can't be promoted because %s has "
                      "maximum promoted instances already",
                      rsc->id, pe__node_name(node));
         return NULL;
     }
 
     return local_node;
 }
 
 /*!
  * \internal
  * \brief Compare two promotable clone instances by promotion priority
  *
  * \param[in] a  First instance to compare
  * \param[in] b  Second instance to compare
  *
  * \return A negative number if \p a has higher promotion priority,
  *         a positive number if \p b has higher promotion priority,
  *         or 0 if promotion priorities are equal
  */
 static gint
 cmp_promotable_instance(gconstpointer a, gconstpointer b)
 {
     const pe_resource_t *rsc1 = (const pe_resource_t *) a;
     const pe_resource_t *rsc2 = (const pe_resource_t *) b;
 
     enum rsc_role_e role1 = RSC_ROLE_UNKNOWN;
     enum rsc_role_e role2 = RSC_ROLE_UNKNOWN;
 
     CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
 
     // Check sort index set by pcmk__set_instance_roles()
     if (rsc1->sort_index > rsc2->sort_index) {
         pe_rsc_trace(rsc1,
                      "%s has higher promotion priority than %s "
                      "(sort index %d > %d)",
                      rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index);
         return -1;
     } else if (rsc1->sort_index < rsc2->sort_index) {
         pe_rsc_trace(rsc1,
                      "%s has lower promotion priority than %s "
                      "(sort index %d < %d)",
                      rsc1->id, rsc2->id, rsc1->sort_index, rsc2->sort_index);
         return 1;
     }
 
     // If those are the same, prefer instance whose current role is higher
     role1 = rsc1->fns->state(rsc1, TRUE);
     role2 = rsc2->fns->state(rsc2, TRUE);
     if (role1 > role2) {
         pe_rsc_trace(rsc1,
                      "%s has higher promotion priority than %s "
                      "(higher current role)",
                      rsc1->id, rsc2->id);
         return -1;
     } else if (role1 < role2) {
         pe_rsc_trace(rsc1,
                      "%s has lower promotion priority than %s "
                      "(lower current role)",
                      rsc1->id, rsc2->id);
         return 1;
     }
 
     // Finally, do normal clone instance sorting
     return pcmk__cmp_instance(a, b);
 }
 
 /*!
  * \internal
  * \brief Add a promotable clone instance's sort index to its node's score
  *
  * Add a promotable clone instance's sort index (which sums its promotion
  * preferences and scores of relevant location constraints for the promoted
  * role) to the node score of the instance's assigned node.
  *
  * \param[in]     data       Promotable clone instance
  * \param[in,out] user_data  Clone parent of \p data
  */
 static void
 add_sort_index_to_node_score(gpointer data, gpointer user_data)
 {
     const pe_resource_t *child = (const pe_resource_t *) data;
     pe_resource_t *clone = (pe_resource_t *) user_data;
 
     pe_node_t *node = NULL;
     const pe_node_t *chosen = NULL;
 
     if (child->sort_index < 0) {
         pe_rsc_trace(clone, "Not adding sort index of %s: negative", child->id);
         return;
     }
 
     chosen = child->fns->location(child, NULL, FALSE);
     if (chosen == NULL) {
         pe_rsc_trace(clone, "Not adding sort index of %s: inactive", child->id);
         return;
     }
 
     node = (pe_node_t *) pe_hash_table_lookup(clone->allowed_nodes,
                                               chosen->details->id);
     CRM_ASSERT(node != NULL);
 
     node->weight = pcmk__add_scores(child->sort_index, node->weight);
     pe_rsc_trace(clone,
                  "Added cumulative priority of %s (%s) to score on %s (now %s)",
                  child->id, pcmk_readable_score(child->sort_index),
                  pe__node_name(node), pcmk_readable_score(node->weight));
 }
 
 /*!
  * \internal
  * \brief Apply colocation to dependent's node scores if for promoted role
  *
  * \param[in,out] data       Colocation constraint to apply
  * \param[in,out] user_data  Promotable clone that is constraint's dependent
  */
 static void
 apply_coloc_to_dependent(gpointer data, gpointer user_data)
 {
     pcmk__colocation_t *constraint = (pcmk__colocation_t *) data;
     pe_resource_t *clone = (pe_resource_t *) user_data;
     pe_resource_t *primary = constraint->primary;
     uint32_t flags = pcmk__coloc_select_default;
     float factor = constraint->score / (float) INFINITY;
 
     if (constraint->dependent_role != RSC_ROLE_PROMOTED) {
         return;
     }
     if (constraint->score < INFINITY) {
         flags = pcmk__coloc_select_active;
     }
     pe_rsc_trace(clone, "Applying colocation %s (promoted %s with %s) @%s",
                  constraint->id, constraint->dependent->id,
                  constraint->primary->id,
                  pcmk_readable_score(constraint->score));
     primary->cmds->add_colocated_node_scores(primary, clone->id,
                                              &clone->allowed_nodes,
                                              constraint, factor, flags);
 }
 
 /*!
  * \internal
  * \brief Apply colocation to primary's node scores if for promoted role
  *
  * \param[in,out] data       Colocation constraint to apply
  * \param[in,out] user_data  Promotable clone that is constraint's primary
  */
 static void
 apply_coloc_to_primary(gpointer data, gpointer user_data)
 {
     pcmk__colocation_t *constraint = (pcmk__colocation_t *) data;
     pe_resource_t *clone = (pe_resource_t *) user_data;
     pe_resource_t *dependent = constraint->dependent;
     const float factor = constraint->score / (float) INFINITY;
     const uint32_t flags = pcmk__coloc_select_active
                            |pcmk__coloc_select_nonnegative;
 
     if ((constraint->primary_role != RSC_ROLE_PROMOTED)
          || !pcmk__colocation_has_influence(constraint, NULL)) {
         return;
     }
 
     pe_rsc_trace(clone, "Applying colocation %s (%s with promoted %s) @%s",
                  constraint->id, constraint->dependent->id,
                  constraint->primary->id,
                  pcmk_readable_score(constraint->score));
     dependent->cmds->add_colocated_node_scores(dependent, clone->id,
                                                &clone->allowed_nodes,
                                                constraint, factor, flags);
 }
 
 /*!
  * \internal
  * \brief Set clone instance's sort index to its node's score
  *
  * \param[in,out] data       Promotable clone instance
  * \param[in]     user_data  Parent clone of \p data
  */
 static void
 set_sort_index_to_node_score(gpointer data, gpointer user_data)
 {
     pe_resource_t *child = (pe_resource_t *) data;
     const pe_resource_t *clone = (const pe_resource_t *) user_data;
 
     pe_node_t *chosen = child->fns->location(child, NULL, FALSE);
 
     if (!pcmk_is_set(child->flags, pe_rsc_managed)
         && (child->next_role == RSC_ROLE_PROMOTED)) {
         child->sort_index = INFINITY;
         pe_rsc_trace(clone,
                      "Final sort index for %s is INFINITY (unmanaged promoted)",
                      child->id);
 
     } else if ((chosen == NULL) || (child->sort_index < 0)) {
         pe_rsc_trace(clone,
                      "Final sort index for %s is %d (ignoring node score)",
                      child->id, child->sort_index);
 
     } else {
         const pe_node_t *node = NULL;
 
         node = pe_hash_table_lookup(clone->allowed_nodes, chosen->details->id);
         CRM_ASSERT(node != NULL);
 
         child->sort_index = node->weight;
         pe_rsc_trace(clone,
                      "Adding scores for %s: final sort index for %s is %d",
                      clone->id, child->id, child->sort_index);
     }
 }
 
 /*!
  * \internal
  * \brief Sort a promotable clone's instances by descending promotion priority
  *
  * \param[in,out] clone  Promotable clone to sort
  */
 static void
 sort_promotable_instances(pe_resource_t *clone)
 {
     if (pe__set_clone_flag(clone, pe__clone_promotion_constrained)
             == pcmk_rc_already) {
         return;
     }
     pe__set_resource_flags(clone, pe_rsc_merging);
 
     for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child = (pe_resource_t *) iter->data;
 
         pe_rsc_trace(clone,
                      "Adding scores for %s: initial sort index for %s is %d",
                      clone->id, child->id, child->sort_index);
     }
     pe__show_node_scores(true, clone, "Before", clone->allowed_nodes,
                          clone->cluster);
 
     /* Because the this_with_colocations() and with_this_colocations() methods
      * boil down to copies of rsc_cons and rsc_cons_lhs for clones, we can use
      * those here directly for efficiency.
      */
     g_list_foreach(clone->children, add_sort_index_to_node_score, clone);
     g_list_foreach(clone->rsc_cons, apply_coloc_to_dependent, clone);
     g_list_foreach(clone->rsc_cons_lhs, apply_coloc_to_primary, clone);
 
     // Ban resource from all nodes if it needs a ticket but doesn't have it
     pcmk__require_promotion_tickets(clone);
 
     pe__show_node_scores(true, clone, "After", clone->allowed_nodes,
                          clone->cluster);
 
     // Reset sort indexes to final node scores
     g_list_foreach(clone->children, set_sort_index_to_node_score, clone);
 
     // Finally, sort instances in descending order of promotion priority
     clone->children = g_list_sort(clone->children, cmp_promotable_instance);
     pe__clear_resource_flags(clone, pe_rsc_merging);
 }
 
 /*!
  * \internal
  * \brief Find the active instance (if any) of an anonymous clone on a node
  *
  * \param[in] clone  Anonymous clone to check
  * \param[in] id     Instance ID (without instance number) to check
  * \param[in] node   Node to check
  *
  * \return
  */
 static pe_resource_t *
 find_active_anon_instance(const pe_resource_t *clone, const char *id,
                           const pe_node_t *node)
 {
     for (GList *iter = clone->children; iter; iter = iter->next) {
         pe_resource_t *child = iter->data;
         pe_resource_t *active = NULL;
 
         // Use ->find_rsc() in case this is a cloned group
         active = clone->fns->find_rsc(child, id, node,
                                       pe_find_clone|pe_find_current);
         if (active != NULL) {
             return active;
         }
     }
     return NULL;
 }
 
 /*
  * \brief Check whether an anonymous clone instance is known on a node
  *
  * \param[in] clone  Anonymous clone to check
  * \param[in] id     Instance ID (without instance number) to check
  * \param[in] node   Node to check
  *
  * \return true if \p id instance of \p clone is known on \p node,
  *         otherwise false
  */
 static bool
 anonymous_known_on(const pe_resource_t *clone, const char *id,
                    const pe_node_t *node)
 {
     for (GList *iter = clone->children; iter; iter = iter->next) {
         pe_resource_t *child = iter->data;
 
         /* Use ->find_rsc() because this might be a cloned group, and knowing
          * that other members of the group are known here implies nothing.
          */
         child = clone->fns->find_rsc(child, id, NULL, pe_find_clone);
         CRM_LOG_ASSERT(child != NULL);
         if (child != NULL) {
             if (g_hash_table_lookup(child->known_on, node->details->id)) {
                 return true;
             }
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is allowed to run a resource
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p node is allowed to run \p rsc, otherwise false
  */
 static bool
 is_allowed(const pe_resource_t *rsc, const pe_node_t *node)
 {
     pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
                                               node->details->id);
 
     return (allowed != NULL) && (allowed->weight >= 0);
 }
 
 /*!
  * \brief Check whether a clone instance's promotion score should be considered
  *
  * \param[in] rsc   Promotable clone instance to check
  * \param[in] node  Node where score would be applied
  *
  * \return true if \p rsc's promotion score should be considered on \p node,
  *         otherwise false
  */
 static bool
 promotion_score_applies(const pe_resource_t *rsc, const pe_node_t *node)
 {
     char *id = clone_strip(rsc->id);
     const pe_resource_t *parent = pe__const_top_resource(rsc, false);
     pe_resource_t *active = NULL;
     const char *reason = "allowed";
 
     // Some checks apply only to anonymous clone instances
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
 
         // If instance is active on the node, its score definitely applies
         active = find_active_anon_instance(parent, id, node);
         if (active == rsc) {
             reason = "active";
             goto check_allowed;
         }
 
         /* If *no* instance is active on this node, this instance's score will
          * count if it has been probed on this node.
          */
         if ((active == NULL) && anonymous_known_on(parent, id, node)) {
             reason = "probed";
             goto check_allowed;
         }
     }
 
     /* If this clone's status is unknown on *all* nodes (e.g. cluster startup),
      * take all instances' scores into account, to make sure we use any
      * permanent promotion scores.
      */
     if ((rsc->running_on == NULL) && (g_hash_table_size(rsc->known_on) == 0)) {
         reason = "none probed";
         goto check_allowed;
     }
 
     /* Otherwise, we've probed and/or started the resource *somewhere*, so
      * consider promotion scores on nodes where we know the status.
      */
     if ((pe_hash_table_lookup(rsc->known_on, node->details->id) != NULL)
         || (pe_find_node_id(rsc->running_on, node->details->id) != NULL)) {
         reason = "known";
     } else {
         pe_rsc_trace(rsc,
                      "Ignoring %s promotion score (for %s) on %s: not probed",
                      rsc->id, id, pe__node_name(node));
         free(id);
         return false;
     }
 
 check_allowed:
     if (is_allowed(rsc, node)) {
         pe_rsc_trace(rsc, "Counting %s promotion score (for %s) on %s: %s",
                      rsc->id, id, pe__node_name(node), reason);
         free(id);
         return true;
     }
 
     pe_rsc_trace(rsc, "Ignoring %s promotion score (for %s) on %s: not allowed",
                  rsc->id, id, pe__node_name(node));
     free(id);
     return false;
 }
 
 /*!
  * \internal
  * \brief Get the value of a promotion score node attribute
  *
  * \param[in] rsc   Promotable clone instance to get promotion score for
  * \param[in] node  Node to get promotion score for
  * \param[in] name  Resource name to use in promotion score attribute name
  *
  * \return Value of promotion score node attribute for \p rsc on \p node
  */
 static const char *
 promotion_attr_value(const pe_resource_t *rsc, const pe_node_t *node,
                      const char *name)
 {
     char *attr_name = NULL;
     const char *attr_value = NULL;
     enum pe__rsc_node node_type = pe__rsc_node_assigned;
 
     if (pcmk_is_set(rsc->flags, pe_rsc_provisional)) {
         // Not assigned yet
         node_type = pe__rsc_node_current;
     }
     attr_name = pcmk_promotion_score_name(name);
-    attr_value = pe_node_attribute_calculated(node, attr_name, rsc, node_type);
+    attr_value = pe__node_attribute_calculated(node, attr_name, rsc, node_type,
+                                               false);
     free(attr_name);
     return attr_value;
 }
 
 /*!
  * \internal
  * \brief Get the promotion score for a clone instance on a node
  *
  * \param[in]  rsc         Promotable clone instance to get score for
  * \param[in]  node        Node to get score for
  * \param[out] is_default  If non-NULL, will be set true if no score available
  *
  * \return Promotion score for \p rsc on \p node (or 0 if none)
  */
 static int
 promotion_score(const pe_resource_t *rsc, const pe_node_t *node,
                 bool *is_default)
 {
     char *name = NULL;
     const char *attr_value = NULL;
 
     if (is_default != NULL) {
         *is_default = true;
     }
 
     CRM_CHECK((rsc != NULL) && (node != NULL), return 0);
 
     /* If this is an instance of a cloned group, the promotion score is the sum
      * of all members' promotion scores.
      */
     if (rsc->children != NULL) {
         int score = 0;
 
         for (const GList *iter = rsc->children;
              iter != NULL; iter = iter->next) {
 
             const pe_resource_t *child = (const pe_resource_t *) iter->data;
             bool child_default = false;
             int child_score = promotion_score(child, node, &child_default);
 
             if (!child_default && (is_default != NULL)) {
                 *is_default = false;
             }
             score += child_score;
         }
         return score;
     }
 
     if (!promotion_score_applies(rsc, node)) {
         return 0;
     }
 
     /* For the promotion score attribute name, use the name the resource is
      * known as in resource history, since that's what crm_attribute --promotion
      * would have used.
      */
     name = (rsc->clone_name == NULL)? rsc->id : rsc->clone_name;
 
     attr_value = promotion_attr_value(rsc, node, name);
     if (attr_value != NULL) {
         pe_rsc_trace(rsc, "Promotion score for %s on %s = %s",
                      name, pe__node_name(node), pcmk__s(attr_value, "(unset)"));
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         /* If we don't have any resource history yet, we won't have clone_name.
          * In that case, for anonymous clones, try the resource name without
          * any instance number.
          */
         name = clone_strip(rsc->id);
         if (strcmp(rsc->id, name) != 0) {
             attr_value = promotion_attr_value(rsc, node, name);
             pe_rsc_trace(rsc, "Promotion score for %s on %s (for %s) = %s",
                          name, pe__node_name(node), rsc->id,
                          pcmk__s(attr_value, "(unset)"));
         }
         free(name);
     }
 
     if (attr_value == NULL) {
         return 0;
     }
 
     if (is_default != NULL) {
         *is_default = false;
     }
     return char2score(attr_value);
 }
 
 /*!
  * \internal
  * \brief Include promotion scores in instances' node scores and priorities
  *
  * \param[in,out] rsc  Promotable clone resource to update
  */
 void
 pcmk__add_promotion_scores(pe_resource_t *rsc)
 {
     if (pe__set_clone_flag(rsc, pe__clone_promotion_added) == pcmk_rc_already) {
         return;
     }
 
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child_rsc = (pe_resource_t *) iter->data;
 
         GHashTableIter iter;
         pe_node_t *node = NULL;
         int score, new_score;
 
         g_hash_table_iter_init(&iter, child_rsc->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             if (!pcmk__node_available(node, false, false)) {
                 /* This node will never be promoted, so don't apply the
                  * promotion score, as that may lead to clone shuffling.
                  */
                 continue;
             }
 
             score = promotion_score(child_rsc, node, NULL);
             if (score > 0) {
                 new_score = pcmk__add_scores(node->weight, score);
                 if (new_score != node->weight) { // Could remain INFINITY
                     node->weight = new_score;
                     pe_rsc_trace(rsc,
                                  "Added %s promotion priority (%s) to score "
                                  "on %s (now %s)",
                                  child_rsc->id, pcmk_readable_score(score),
                                  pe__node_name(node),
                                  pcmk_readable_score(new_score));
                 }
             }
 
             if (score > child_rsc->priority) {
                 pe_rsc_trace(rsc,
                              "Updating %s priority to promotion score (%d->%d)",
                              child_rsc->id, child_rsc->priority, score);
                 child_rsc->priority = score;
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief If a resource's current role is started, change it to unpromoted
  *
  * \param[in,out] data       Resource to update
  * \param[in]     user_data  Ignored
  */
 static void
 set_current_role_unpromoted(void *data, void *user_data)
 {
     pe_resource_t *rsc = (pe_resource_t *) data;
 
     if (rsc->role == RSC_ROLE_STARTED) {
         // Promotable clones should use unpromoted role instead of started
         rsc->role = RSC_ROLE_UNPROMOTED;
     }
     g_list_foreach(rsc->children, set_current_role_unpromoted, NULL);
 }
 
 /*!
  * \internal
  * \brief Set a resource's next role to unpromoted (or stopped if unassigned)
  *
  * \param[in,out] data       Resource to update
  * \param[in]     user_data  Ignored
  */
 static void
 set_next_role_unpromoted(void *data, void *user_data)
 {
     pe_resource_t *rsc = (pe_resource_t *) data;
     GList *assigned = NULL;
 
     rsc->fns->location(rsc, &assigned, FALSE);
     if (assigned == NULL) {
         pe__set_next_role(rsc, RSC_ROLE_STOPPED, "stopped instance");
     } else {
         pe__set_next_role(rsc, RSC_ROLE_UNPROMOTED, "unpromoted instance");
         g_list_free(assigned);
     }
     g_list_foreach(rsc->children, set_next_role_unpromoted, NULL);
 }
 
 /*!
  * \internal
  * \brief Set a resource's next role to promoted if not already set
  *
  * \param[in,out] data       Resource to update
  * \param[in]     user_data  Ignored
  */
 static void
 set_next_role_promoted(void *data, gpointer user_data)
 {
     pe_resource_t *rsc = (pe_resource_t *) data;
 
     if (rsc->next_role == RSC_ROLE_UNKNOWN) {
         pe__set_next_role(rsc, RSC_ROLE_PROMOTED, "promoted instance");
     }
     g_list_foreach(rsc->children, set_next_role_promoted, NULL);
 }
 
 /*!
  * \internal
  * \brief Show instance's promotion score on node where it will be active
  *
  * \param[in,out] instance  Promotable clone instance to show
  */
 static void
 show_promotion_score(pe_resource_t *instance)
 {
     pe_node_t *chosen = instance->fns->location(instance, NULL, FALSE);
 
     if (pcmk_is_set(instance->cluster->flags, pe_flag_show_scores)
         && !pcmk__is_daemon && (instance->cluster->priv != NULL)) {
 
         pcmk__output_t *out = instance->cluster->priv;
 
         out->message(out, "promotion-score", instance, chosen,
                      pcmk_readable_score(instance->sort_index));
     } else {
         pe_rsc_debug(pe__const_top_resource(instance, false),
                      "%s promotion score on %s: sort=%s priority=%s",
                      instance->id,
                      ((chosen == NULL)? "none" : pe__node_name(chosen)),
                      pcmk_readable_score(instance->sort_index),
                      pcmk_readable_score(instance->priority));
     }
 }
 
 /*!
  * \internal
  * \brief Set a clone instance's promotion priority
  *
  * \param[in,out] data       Promotable clone instance to update
  * \param[in]     user_data  Instance's parent clone
  */
 static void
 set_instance_priority(gpointer data, gpointer user_data)
 {
     pe_resource_t *instance = (pe_resource_t *) data;
     const pe_resource_t *clone = (const pe_resource_t *) user_data;
     const pe_node_t *chosen = NULL;
     enum rsc_role_e next_role = RSC_ROLE_UNKNOWN;
     GList *list = NULL;
 
     pe_rsc_trace(clone, "Assigning priority for %s: %s", instance->id,
                  role2text(instance->next_role));
 
     if (instance->fns->state(instance, TRUE) == RSC_ROLE_STARTED) {
         set_current_role_unpromoted(instance, NULL);
     }
 
     // Only an instance that will be active can be promoted
     chosen = instance->fns->location(instance, &list, FALSE);
     if (pcmk__list_of_multiple(list)) {
         pcmk__config_err("Cannot promote non-colocated child %s",
                          instance->id);
     }
     g_list_free(list);
     if (chosen == NULL) {
         return;
     }
 
     next_role = instance->fns->state(instance, FALSE);
     switch (next_role) {
         case RSC_ROLE_STARTED:
         case RSC_ROLE_UNKNOWN:
             // Set instance priority to its promotion score (or -1 if none)
             {
                 bool is_default = false;
 
                 instance->priority = promotion_score(instance, chosen,
                                                       &is_default);
                 if (is_default) {
                     /*
                      * Default to -1 if no value is set. This allows
                      * instances eligible for promotion to be specified
                      * based solely on rsc_location constraints, but
                      * prevents any instance from being promoted if neither
                      * a constraint nor a promotion score is present
                      */
                     instance->priority = -1;
                 }
             }
             break;
 
         case RSC_ROLE_UNPROMOTED:
         case RSC_ROLE_STOPPED:
             // Instance can't be promoted
             instance->priority = -INFINITY;
             break;
 
         case RSC_ROLE_PROMOTED:
             // Nothing needed (re-creating actions after scheduling fencing)
             break;
 
         default:
             CRM_CHECK(FALSE, crm_err("Unknown resource role %d for %s",
                                      next_role, instance->id));
     }
 
     // Add relevant location constraint scores for promoted role
     apply_promoted_locations(instance, instance->rsc_location, chosen);
     apply_promoted_locations(instance, clone->rsc_location, chosen);
 
     // Consider instance's role-based colocations with other resources
     list = pcmk__this_with_colocations(instance);
     for (GList *iter = list; iter != NULL; iter = iter->next) {
         pcmk__colocation_t *cons = (pcmk__colocation_t *) iter->data;
 
         instance->cmds->apply_coloc_score(instance, cons->primary, cons, true);
     }
     g_list_free(list);
 
     instance->sort_index = instance->priority;
     if (next_role == RSC_ROLE_PROMOTED) {
         instance->sort_index = INFINITY;
     }
     pe_rsc_trace(clone, "Assigning %s priority = %d",
                  instance->id, instance->priority);
 }
 
 /*!
  * \internal
  * \brief Set a promotable clone instance's role
  *
  * \param[in,out] data       Promotable clone instance to update
  * \param[in,out] user_data  Pointer to count of instances chosen for promotion
  */
 static void
 set_instance_role(gpointer data, gpointer user_data)
 {
     pe_resource_t *instance = (pe_resource_t *) data;
     int *count = (int *) user_data;
 
     const pe_resource_t *clone = pe__const_top_resource(instance, false);
     pe_node_t *chosen = NULL;
 
     show_promotion_score(instance);
 
     if (instance->sort_index < 0) {
         pe_rsc_trace(clone, "Not supposed to promote instance %s",
                      instance->id);
 
     } else if ((*count < pe__clone_promoted_max(instance))
                || !pcmk_is_set(clone->flags, pe_rsc_managed)) {
         chosen = node_to_be_promoted_on(instance);
     }
 
     if (chosen == NULL) {
         set_next_role_unpromoted(instance, NULL);
         return;
     }
 
     if ((instance->role < RSC_ROLE_PROMOTED)
         && !pcmk_is_set(instance->cluster->flags, pe_flag_have_quorum)
         && (instance->cluster->no_quorum_policy == no_quorum_freeze)) {
         crm_notice("Clone instance %s cannot be promoted without quorum",
                    instance->id);
         set_next_role_unpromoted(instance, NULL);
         return;
     }
 
     chosen->count++;
     pe_rsc_info(clone, "Choosing %s (%s) on %s for promotion",
                 instance->id, role2text(instance->role),
                 pe__node_name(chosen));
     set_next_role_promoted(instance, NULL);
     (*count)++;
 }
 
 /*!
  * \internal
  * \brief Set roles for all instances of a promotable clone
  *
  * \param[in,out] rsc  Promotable clone resource to update
  */
 void
 pcmk__set_instance_roles(pe_resource_t *rsc)
 {
     int promoted = 0;
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     // Repurpose count to track the number of promoted instances assigned
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         node->count = 0;
     }
 
     // Set instances' promotion priorities and sort by highest priority first
     g_list_foreach(rsc->children, set_instance_priority, rsc);
     sort_promotable_instances(rsc);
 
     // Choose the first N eligible instances to be promoted
     g_list_foreach(rsc->children, set_instance_role, &promoted);
     pe_rsc_info(rsc, "%s: Promoted %d instances of a possible %d",
                 rsc->id, promoted, pe__clone_promoted_max(rsc));
 }
 
 /*!
  *
  * \internal
  * \brief Create actions for promotable clone instances
  *
  * \param[in,out] clone          Promotable clone to create actions for
  * \param[out]    any_promoting  Will be set true if any instance is promoting
  * \param[out]    any_demoting   Will be set true if any instance is demoting
  */
 static void
 create_promotable_instance_actions(pe_resource_t *clone,
                                    bool *any_promoting, bool *any_demoting)
 {
     for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         instance->cmds->create_actions(instance);
         check_for_role_change(instance, any_demoting, any_promoting);
     }
 }
 
 /*!
  * \internal
  * \brief Reset each promotable instance's resource priority
  *
  * Reset the priority of each instance of a promotable clone to the clone's
  * priority (after promotion actions are scheduled, when instance priorities
  * were repurposed as promotion scores).
  *
  * \param[in,out] clone  Promotable clone to reset
  */
 static void
 reset_instance_priorities(pe_resource_t *clone)
 {
     for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         instance->priority = clone->priority;
     }
 }
 
 /*!
  * \internal
  * \brief Create actions specific to promotable clones
  *
  * \param[in,out] clone  Promotable clone to create actions for
  */
 void
 pcmk__create_promotable_actions(pe_resource_t *clone)
 {
     bool any_promoting = false;
     bool any_demoting = false;
 
     // Create actions for each clone instance individually
     create_promotable_instance_actions(clone, &any_promoting, &any_demoting);
 
     // Create pseudo-actions for clone as a whole
     pe__create_promotable_pseudo_ops(clone, any_promoting, any_demoting);
 
     // Undo our temporary repurposing of resource priority for instances
     reset_instance_priorities(clone);
 }
 
 /*!
  * \internal
  * \brief Create internal orderings for a promotable clone's instances
  *
  * \param[in,out] clone  Promotable clone instance to order
  */
 void
 pcmk__order_promotable_instances(pe_resource_t *clone)
 {
     pe_resource_t *previous = NULL; // Needed for ordered clones
 
     pcmk__promotable_restart_ordering(clone);
 
     for (GList *iter = clone->children; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         // Demote before promote
         pcmk__order_resource_actions(instance, RSC_DEMOTE,
                                      instance, RSC_PROMOTE,
                                      pe_order_optional);
 
         order_instance_promotion(clone, instance, previous);
         order_instance_demotion(clone, instance, previous);
         previous = instance;
     }
 }
 
 /*!
  * \internal
  * \brief Update dependent's allowed nodes for colocation with promotable
  *
  * \param[in,out] dependent     Dependent resource to update
  * \param[in]     primary_node  Node where an instance of the primary will be
  * \param[in]     colocation    Colocation constraint to apply
  */
 static void
 update_dependent_allowed_nodes(pe_resource_t *dependent,
                                const pe_node_t *primary_node,
                                const pcmk__colocation_t *colocation)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     const char *primary_value = NULL;
     const char *attr = colocation->node_attribute;
 
     if (colocation->score >= INFINITY) {
         return; // Colocation is mandatory, so allowed node scores don't matter
     }
 
     // Get value of primary's colocation node attribute
     primary_value = pe_node_attribute_raw(primary_node, attr);
 
     pe_rsc_trace(colocation->primary,
                  "Applying %s (%s with %s on %s by %s @%d) to %s",
                  colocation->id, colocation->dependent->id,
                  colocation->primary->id, pe__node_name(primary_node), attr,
                  colocation->score, dependent->id);
 
     g_hash_table_iter_init(&iter, dependent->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         const char *dependent_value = pe_node_attribute_raw(node, attr);
 
         if (pcmk__str_eq(primary_value, dependent_value, pcmk__str_casei)) {
             node->weight = pcmk__add_scores(node->weight, colocation->score);
             pe_rsc_trace(colocation->primary,
                          "Added %s score (%s) to %s (now %s)",
                          colocation->id, pcmk_readable_score(colocation->score),
                          pe__node_name(node),
                          pcmk_readable_score(node->weight));
         }
     }
 }
 
 /*!
  * \brief Update dependent for a colocation with a promotable clone
  *
  * \param[in]     primary     Primary resource in the colocation
  * \param[in,out] dependent   Dependent resource in the colocation
  * \param[in]     colocation  Colocation constraint to apply
  */
 void
 pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
                                        pe_resource_t *dependent,
                                        const pcmk__colocation_t *colocation)
 {
     GList *affected_nodes = NULL;
 
     /* Build a list of all nodes where an instance of the primary will be, and
      * (for optional colocations) update the dependent's allowed node scores for
      * each one.
      */
     for (GList *iter = primary->children; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
         pe_node_t *node = instance->fns->location(instance, NULL, FALSE);
 
         if (node == NULL) {
             continue;
         }
         if (instance->fns->state(instance, FALSE) == colocation->primary_role) {
             update_dependent_allowed_nodes(dependent, node, colocation);
             affected_nodes = g_list_prepend(affected_nodes, node);
         }
     }
 
     /* For mandatory colocations, add the primary's node score to the
      * dependent's node score for each affected node, and ban the dependent
      * from all other nodes.
      *
      * However, skip this for promoted-with-promoted colocations, otherwise
      * inactive dependent instances can't start (in the unpromoted role).
      */
     if ((colocation->score >= INFINITY)
         && ((colocation->dependent_role != RSC_ROLE_PROMOTED)
             || (colocation->primary_role != RSC_ROLE_PROMOTED))) {
 
         pe_rsc_trace(colocation->primary,
                      "Applying %s (mandatory %s with %s) to %s",
                      colocation->id, colocation->dependent->id,
                      colocation->primary->id, dependent->id);
         node_list_exclude(dependent->allowed_nodes, affected_nodes,
                           TRUE);
     }
     g_list_free(affected_nodes);
 }
 
 /*!
  * \internal
  * \brief Update dependent priority for colocation with promotable
  *
  * \param[in]     primary     Primary resource in the colocation
  * \param[in,out] dependent   Dependent resource in the colocation
  * \param[in]     colocation  Colocation constraint to apply
  */
 void
 pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
                                            pe_resource_t *dependent,
                                            const pcmk__colocation_t *colocation)
 {
     pe_resource_t *primary_instance = NULL;
 
     // Look for a primary instance where dependent will be
     primary_instance = pcmk__find_compatible_instance(dependent, primary,
                                                       colocation->primary_role,
                                                       false);
 
     if (primary_instance != NULL) {
         // Add primary instance's priority to dependent's
         int new_priority = pcmk__add_scores(dependent->priority,
                                             colocation->score);
 
         pe_rsc_trace(colocation->primary,
                      "Applying %s (%s with %s) to %s priority (%s + %s = %s)",
                      colocation->id, colocation->dependent->id,
                      colocation->primary->id, dependent->id,
                      pcmk_readable_score(dependent->priority),
                      pcmk_readable_score(colocation->score),
                      pcmk_readable_score(new_priority));
         dependent->priority = new_priority;
 
     } else if (colocation->score >= INFINITY) {
         // Mandatory colocation, but primary won't be here
         pe_rsc_trace(colocation->primary,
                      "Applying %s (%s with %s) to %s: can't be promoted",
                      colocation->id, colocation->dependent->id,
                      colocation->primary->id, dependent->id);
         dependent->priority = -INFINITY;
     }
 }
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
index d10628e322..b820abcb23 100644
--- a/lib/pengine/common.c
+++ b/lib/pengine/common.c
@@ -1,611 +1,627 @@
 /*
- * Copyright 2004-2022 the Pacemaker project contributors
+ * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/util.h>
 
 #include <glib.h>
 
 #include <crm/pengine/internal.h>
 
 gboolean was_processing_error = FALSE;
 gboolean was_processing_warning = FALSE;
 
 static bool
 check_placement_strategy(const char *value)
 {
     return pcmk__strcase_any_of(value, "default", "utilization", "minimal",
                            "balanced", NULL);
 }
 
 static pcmk__cluster_option_t pe_opts[] = {
     /* name, old name, type, allowed values,
      * default value, validator,
      * short description,
      * long description
      */
     {
         "no-quorum-policy", NULL, "select", "stop, freeze, ignore, demote, suicide",
         "stop", pcmk__valid_quorum,
         N_("What to do when the cluster does not have quorum"),
         NULL
     },
     {
         "symmetric-cluster", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether resources can run on any node by default"),
         NULL
     },
     {
         "maintenance-mode", NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether the cluster should refrain from monitoring, starting, "
             "and stopping resources"),
         NULL
     },
     {
         "start-failure-is-fatal", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether a start failure should prevent a resource from being "
             "recovered on the same node"),
         N_("When true, the cluster will immediately ban a resource from a node "
             "if it fails to start there. When false, the cluster will instead "
             "check the resource's fail count against its migration-threshold.")
     },
     {
         "enable-startup-probes", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether the cluster should check for active resources during start-up"),
         NULL
     },
     {
         XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether to lock resources to a cleanly shut down node"),
         N_("When true, resources active on a node when it is cleanly shut down "
             "are kept \"locked\" to that node (not allowed to run elsewhere) "
             "until they start again on that node after it rejoins (or for at "
             "most shutdown-lock-limit, if set). Stonith resources and "
             "Pacemaker Remote connections are never locked. Clone and bundle "
             "instances and the promoted role of promotable clones are "
             "currently never locked, though support could be added in a future "
             "release.")
     },
     {
         XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL,
         "0", pcmk__valid_interval_spec,
         N_("Do not lock resources to a cleanly shut down node longer than "
            "this"),
         N_("If shutdown-lock is true and this is set to a nonzero time "
             "duration, shutdown locks will expire after this much time has "
             "passed since the shutdown was initiated, even if the node has not "
             "rejoined.")
     },
 
     // Fencing-related options
     {
         "stonith-enabled", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("*** Advanced Use Only *** "
             "Whether nodes may be fenced as part of recovery"),
         N_("If false, unresponsive nodes are immediately assumed to be harmless, "
             "and resources that were active on them may be recovered "
             "elsewhere. This can result in a \"split-brain\" situation, "
             "potentially leading to data loss and/or service unavailability.")
     },
     {
         "stonith-action", NULL, "select", "reboot, off, poweroff",
         "reboot", pcmk__is_fencing_action,
         N_("Action to send to fence device when a node needs to be fenced "
             "(\"poweroff\" is a deprecated alias for \"off\")"),
         NULL
     },
     {
         "stonith-timeout", NULL, "time", NULL,
         "60s", pcmk__valid_interval_spec,
         N_("*** Advanced Use Only *** Unused by Pacemaker"),
         N_("This value is not used by Pacemaker, but is kept for backward "
             "compatibility, and certain legacy fence agents might use it.")
     },
     {
         XML_ATTR_HAVE_WATCHDOG, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether watchdog integration is enabled"),
         N_("This is set automatically by the cluster according to whether SBD "
             "is detected to be in use. User-configured values are ignored. "
             "The value `true` is meaningful if diskless SBD is used and "
             "`stonith-watchdog-timeout` is nonzero. In that case, if fencing "
             "is required, watchdog-based self-fencing will be performed via "
             "SBD without requiring a fencing resource explicitly configured.")
     },
     {
         "concurrent-fencing", NULL, "boolean", NULL,
         PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean,
         N_("Allow performing fencing operations in parallel"),
         NULL
     },
     {
         "startup-fencing", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("*** Advanced Use Only *** Whether to fence unseen nodes at start-up"),
         N_("Setting this to false may lead to a \"split-brain\" situation,"
             "potentially leading to data loss and/or service unavailability.")
     },
     {
         XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY, NULL, "time", NULL,
         "0", pcmk__valid_interval_spec,
         N_("Apply fencing delay targeting the lost nodes with the highest total resource priority"),
         N_("Apply specified delay for the fencings that are targeting the lost "
             "nodes with the highest total resource priority in case we don't "
             "have the majority of the nodes in our cluster partition, so that "
             "the more significant nodes potentially win any fencing match, "
             "which is especially meaningful under split-brain of 2-node "
             "cluster. A promoted resource instance takes the base priority + 1 "
             "on calculation if the base priority is not 0. Any static/random "
             "delays that are introduced by `pcmk_delay_base/max` configured "
             "for the corresponding fencing resources will be added to this "
             "delay. This delay should be significantly greater than, safely "
             "twice, the maximum `pcmk_delay_base/max`. By default, priority "
             "fencing delay is disabled.")
     },
 
     {
         XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL,
         "10min", pcmk__valid_interval_spec,
         N_("How long to wait for a node that has joined the cluster to join "
            "the process group"),
         N_("A node that has joined the cluster can be pending on joining the "
            "process group. We wait up to this much time for it. If it times "
            "out, fencing targeting the node will be issued if enabled.")
     },
     {
         "cluster-delay", NULL, "time", NULL,
         "60s", pcmk__valid_interval_spec,
         N_("Maximum time for node-to-node communication"),
         N_("The node elected Designated Controller (DC) will consider an action "
             "failed if it does not get a response from the node executing the "
             "action within this time (after considering the action's own "
             "timeout). The \"correct\" value will depend on the speed and "
             "load of your network and cluster nodes.")
     },
     {
         "batch-limit", NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("Maximum number of jobs that the cluster may execute in parallel "
             "across all nodes"),
         N_("The \"correct\" value will depend on the speed and load of your "
             "network and cluster nodes. If set to 0, the cluster will "
             "impose a dynamically calculated limit when any node has a "
             "high load.")
     },
     {
         "migration-limit", NULL, "integer", NULL,
         "-1", pcmk__valid_number,
         N_("The number of live migration actions that the cluster is allowed "
             "to execute in parallel on a node (-1 means no limit)")
     },
 
     /* Orphans and stopping */
     {
         "stop-all-resources", NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether the cluster should stop all active resources"),
         NULL
     },
     {
         "stop-orphan-resources", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether to stop resources that were removed from the configuration"),
         NULL
     },
     {
         "stop-orphan-actions", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether to cancel recurring actions removed from the configuration"),
         NULL
     },
     {
         "remove-after-stop", NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("*** Deprecated *** Whether to remove stopped resources from "
             "the executor"),
         N_("Values other than default are poorly tested and potentially dangerous."
             " This option will be removed in a future release.")
     },
 
     /* Storing inputs */
     {
         "pe-error-series-max", NULL, "integer", NULL,
         "-1", pcmk__valid_number,
         N_("The number of scheduler inputs resulting in errors to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
     {
         "pe-warn-series-max",  NULL, "integer", NULL,
         "5000", pcmk__valid_number,
         N_("The number of scheduler inputs resulting in warnings to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
     {
         "pe-input-series-max", NULL, "integer", NULL,
         "4000", pcmk__valid_number,
         N_("The number of scheduler inputs without errors or warnings to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
 
     /* Node health */
     {
         PCMK__OPT_NODE_HEALTH_STRATEGY, NULL, "select",
         PCMK__VALUE_NONE ", " PCMK__VALUE_MIGRATE_ON_RED ", "
             PCMK__VALUE_ONLY_GREEN ", " PCMK__VALUE_PROGRESSIVE ", "
             PCMK__VALUE_CUSTOM,
         PCMK__VALUE_NONE, pcmk__validate_health_strategy,
         N_("How cluster should react to node health attributes"),
         N_("Requires external entities to create node attributes (named with "
             "the prefix \"#health\") with values \"red\", "
             "\"yellow\", or \"green\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_BASE, NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("Base health score assigned to a node"),
         N_("Only used when \"node-health-strategy\" is set to \"progressive\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_GREEN, NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("The score to use for a node health attribute whose value is \"green\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_YELLOW, NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("The score to use for a node health attribute whose value is \"yellow\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_RED, NULL, "integer", NULL,
         "-INFINITY", pcmk__valid_number,
         N_("The score to use for a node health attribute whose value is \"red\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
 
     /*Placement Strategy*/
     {
         "placement-strategy", NULL, "select",
         "default, utilization, minimal, balanced",
         "default", check_placement_strategy,
         N_("How the cluster should allocate resources to nodes"),
         NULL
     },
 };
 
 void
 pe_metadata(pcmk__output_t *out)
 {
     const char *desc_short = "Pacemaker scheduler options";
     const char *desc_long = "Cluster options used by Pacemaker's scheduler";
 
     gchar *s = pcmk__format_option_metadata("pacemaker-schedulerd", desc_short,
                                             desc_long, pe_opts,
                                             PCMK__NELEM(pe_opts));
     out->output_xml(out, "metadata", s);
     g_free(s);
 }
 
 void
 verify_pe_options(GHashTable * options)
 {
     pcmk__validate_cluster_options(options, pe_opts, PCMK__NELEM(pe_opts));
 }
 
 const char *
 pe_pref(GHashTable * options, const char *name)
 {
     return pcmk__cluster_option(options, pe_opts, PCMK__NELEM(pe_opts), name);
 }
 
 const char *
 fail2text(enum action_fail_response fail)
 {
     const char *result = "<unknown>";
 
     switch (fail) {
         case action_fail_ignore:
             result = "ignore";
             break;
         case action_fail_demote:
             result = "demote";
             break;
         case action_fail_block:
             result = "block";
             break;
         case action_fail_recover:
             result = "recover";
             break;
         case action_fail_migrate:
             result = "migrate";
             break;
         case action_fail_stop:
             result = "stop";
             break;
         case action_fail_fence:
             result = "fence";
             break;
         case action_fail_standby:
             result = "standby";
             break;
         case action_fail_restart_container:
             result = "restart-container";
             break;
         case action_fail_reset_remote:
             result = "reset-remote";
             break;
     }
     return result;
 }
 
 enum action_tasks
 text2task(const char *task)
 {
     if (pcmk__str_eq(task, CRMD_ACTION_STOP, pcmk__str_casei)) {
         return stop_rsc;
     } else if (pcmk__str_eq(task, CRMD_ACTION_STOPPED, pcmk__str_casei)) {
         return stopped_rsc;
     } else if (pcmk__str_eq(task, CRMD_ACTION_START, pcmk__str_casei)) {
         return start_rsc;
     } else if (pcmk__str_eq(task, CRMD_ACTION_STARTED, pcmk__str_casei)) {
         return started_rsc;
     } else if (pcmk__str_eq(task, CRM_OP_SHUTDOWN, pcmk__str_casei)) {
         return shutdown_crm;
     } else if (pcmk__str_eq(task, CRM_OP_FENCE, pcmk__str_casei)) {
         return stonith_node;
     } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
         return monitor_rsc;
     } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFY, pcmk__str_casei)) {
         return action_notify;
     } else if (pcmk__str_eq(task, CRMD_ACTION_NOTIFIED, pcmk__str_casei)) {
         return action_notified;
     } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTE, pcmk__str_casei)) {
         return action_promote;
     } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTE, pcmk__str_casei)) {
         return action_demote;
     } else if (pcmk__str_eq(task, CRMD_ACTION_PROMOTED, pcmk__str_casei)) {
         return action_promoted;
     } else if (pcmk__str_eq(task, CRMD_ACTION_DEMOTED, pcmk__str_casei)) {
         return action_demoted;
     }
 #if SUPPORT_TRACING
     if (pcmk__str_eq(task, CRMD_ACTION_CANCEL, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, CRMD_ACTION_DELETE, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, CRMD_ACTION_STATUS, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATE, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, CRMD_ACTION_MIGRATED, pcmk__str_casei)) {
         return no_action;
     }
     crm_trace("Unsupported action: %s", task);
 #endif
 
     return no_action;
 }
 
 const char *
 task2text(enum action_tasks task)
 {
     const char *result = "<unknown>";
 
     switch (task) {
         case no_action:
             result = "no_action";
             break;
         case stop_rsc:
             result = CRMD_ACTION_STOP;
             break;
         case stopped_rsc:
             result = CRMD_ACTION_STOPPED;
             break;
         case start_rsc:
             result = CRMD_ACTION_START;
             break;
         case started_rsc:
             result = CRMD_ACTION_STARTED;
             break;
         case shutdown_crm:
             result = CRM_OP_SHUTDOWN;
             break;
         case stonith_node:
             result = CRM_OP_FENCE;
             break;
         case monitor_rsc:
             result = CRMD_ACTION_STATUS;
             break;
         case action_notify:
             result = CRMD_ACTION_NOTIFY;
             break;
         case action_notified:
             result = CRMD_ACTION_NOTIFIED;
             break;
         case action_promote:
             result = CRMD_ACTION_PROMOTE;
             break;
         case action_promoted:
             result = CRMD_ACTION_PROMOTED;
             break;
         case action_demote:
             result = CRMD_ACTION_DEMOTE;
             break;
         case action_demoted:
             result = CRMD_ACTION_DEMOTED;
             break;
     }
 
     return result;
 }
 
 const char *
 role2text(enum rsc_role_e role)
 {
     switch (role) {
         case RSC_ROLE_UNKNOWN:
             return RSC_ROLE_UNKNOWN_S;
         case RSC_ROLE_STOPPED:
             return RSC_ROLE_STOPPED_S;
         case RSC_ROLE_STARTED:
             return RSC_ROLE_STARTED_S;
         case RSC_ROLE_UNPROMOTED:
 #ifdef PCMK__COMPAT_2_0
             return RSC_ROLE_UNPROMOTED_LEGACY_S;
 #else
             return RSC_ROLE_UNPROMOTED_S;
 #endif
         case RSC_ROLE_PROMOTED:
 #ifdef PCMK__COMPAT_2_0
             return RSC_ROLE_PROMOTED_LEGACY_S;
 #else
             return RSC_ROLE_PROMOTED_S;
 #endif
     }
     CRM_CHECK(role >= RSC_ROLE_UNKNOWN, return RSC_ROLE_UNKNOWN_S);
     CRM_CHECK(role < RSC_ROLE_MAX, return RSC_ROLE_UNKNOWN_S);
     // coverity[dead_error_line]
     return RSC_ROLE_UNKNOWN_S;
 }
 
 enum rsc_role_e
 text2role(const char *role)
 {
     CRM_ASSERT(role != NULL);
     if (pcmk__str_eq(role, RSC_ROLE_STOPPED_S, pcmk__str_casei)) {
         return RSC_ROLE_STOPPED;
     } else if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_casei)) {
         return RSC_ROLE_STARTED;
     } else if (pcmk__strcase_any_of(role, RSC_ROLE_UNPROMOTED_S,
                                     RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
         return RSC_ROLE_UNPROMOTED;
     } else if (pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
                                     RSC_ROLE_PROMOTED_LEGACY_S, NULL)) {
         return RSC_ROLE_PROMOTED;
     } else if (pcmk__str_eq(role, RSC_ROLE_UNKNOWN_S, pcmk__str_casei)) {
         return RSC_ROLE_UNKNOWN;
     }
     crm_err("Unknown role: %s", role);
     return RSC_ROLE_UNKNOWN;
 }
 
 void
 add_hash_param(GHashTable * hash, const char *name, const char *value)
 {
     CRM_CHECK(hash != NULL, return);
 
     crm_trace("Adding name='%s' value='%s' to hash table",
               pcmk__s(name, "<null>"), pcmk__s(value, "<null>"));
     if (name == NULL || value == NULL) {
         return;
 
     } else if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
         return;
 
     } else if (g_hash_table_lookup(hash, name) == NULL) {
         g_hash_table_insert(hash, strdup(name), strdup(value));
     }
 }
 
+/*!
+ * \internal
+ * \brief Look up an attribute value on the appropriate node
+ *
+ * If \p node is a guest node and either the \c XML_RSC_ATTR_TARGET meta
+ * attribute is set to "host" for \p rsc or \p force_host is \c true, query the
+ * attribute on the node's host. Otherwise, query the attribute on \p node
+ * itself.
+ *
+ * \param[in] node        Node to query attribute value on by default
+ * \param[in] name        Name of attribute to query
+ * \param[in] rsc         Resource on whose behalf we're querying
+ * \param[in] node_type   Type of resource location lookup
+ * \param[in] force_host  Force a lookup on the guest node's host, regardless of
+ *                        the \c XML_RSC_ATTR_TARGET value
+ *
+ * \return Value of the attribute on \p node or on the host of \p node
+ *
+ * \note If \p force_host is \c true, \p node \e must be a guest node.
+ */
 const char *
-pe_node_attribute_calculated(const pe_node_t *node, const char *name,
-                             const pe_resource_t *rsc,
-                             enum pe__rsc_node node_type)
+pe__node_attribute_calculated(const pe_node_t *node, const char *name,
+                              const pe_resource_t *rsc,
+                              enum pe__rsc_node node_type,
+                              bool force_host)
 {
+    // @TODO: Use pe__is_guest_node() after merging libpe_{rules,status}
+    bool is_guest = (node != NULL) && (node->details->type == node_remote)
+                    && (node->details->remote_rsc != NULL)
+                    && (node->details->remote_rsc->container != NULL);
     const char *source = NULL;
     const char *node_type_s = NULL;
     const char *reason = NULL;
 
     const pe_resource_t *container = NULL;
     const pe_node_t *host = NULL;
 
-    if(node == NULL) {
-        return NULL;
-
-    } else if(rsc == NULL) {
-        return g_hash_table_lookup(node->details->attrs, name);
-    }
+    CRM_ASSERT((node != NULL) && (name != NULL) && (rsc != NULL)
+               && (!force_host || is_guest));
 
+    /* Ignore XML_RSC_ATTR_TARGET if node is not a guest node. This represents a
+     * user configuration error.
+     */
     source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
-    if(source == NULL || !pcmk__str_eq("host", source, pcmk__str_casei)) {
+    if (!force_host
+        && (!is_guest || !pcmk__str_eq(source, "host", pcmk__str_casei))) {
+
         return g_hash_table_lookup(node->details->attrs, name);
     }
 
-    /* Use attributes set for the containers location
-     * instead of for the container itself
-     *
-     * Useful when the container is using the host's local
-     * storage
-     */
-
-    CRM_ASSERT(node->details->remote_rsc != NULL);
-
     container = node->details->remote_rsc->container;
-    CRM_ASSERT(container != NULL);
 
     switch (node_type) {
         case pe__rsc_node_assigned:
             node_type_s = "assigned";
             host = container->allocated_to;
             if (host == NULL) {
                 reason = "not assigned";
             }
             break;
 
         case pe__rsc_node_current:
             node_type_s = "current";
 
             if (container->running_on != NULL) {
                 host = container->running_on->data;
             }
             if (host == NULL) {
                 reason = "inactive";
             }
             break;
 
         default:
             // Add support for other enum pe__rsc_node values if needed
             CRM_ASSERT(false);
             break;
     }
 
     if (host != NULL) {
         const char *value = g_hash_table_lookup(host->details->attrs, name);
 
         pe_rsc_trace(rsc,
                      "%s: Value lookup for %s on %s container host %s %s%s",
                      rsc->id, name, node_type_s, pe__node_name(host),
                      ((value != NULL)? "succeeded: " : "failed"),
                      pcmk__s(value, ""));
         return value;
     }
     pe_rsc_trace(rsc,
                  "%s: Not looking for %s on %s container host: %s is %s",
                  rsc->id, name, node_type_s, container->id, reason);
     return NULL;
 }
 
 const char *
 pe_node_attribute_raw(const pe_node_t *node, const char *name)
 {
     if(node == NULL) {
         return NULL;
     }
     return g_hash_table_lookup(node->details->attrs, name);
 }