diff --git a/include/crm/common/Makefile.am b/include/crm/common/Makefile.am
index 5a3935b167..79da3aab76 100644
--- a/include/crm/common/Makefile.am
+++ b/include/crm/common/Makefile.am
@@ -1,60 +1,62 @@
 #
 # Copyright 2004-2023 the Pacemaker project contributors
 #
 # The version control history for this file may have further details.
 #
 # This source code is licensed under the GNU General Public License version 2
 # or later (GPLv2+) WITHOUT ANY WARRANTY.
 #
 
 MAINTAINERCLEANFILES = Makefile.in
 
 headerdir=$(pkgincludedir)/crm/common
 
 header_HEADERS = acl.h \
 		 actions.h		\
 				 agents.h \
 				 agents_compat.h \
 				 cib.h \
 				 ipc.h \
 				 ipc_attrd_internal.h \
 				 ipc_controld.h \
 				 ipc_pacemakerd.h \
 				 ipc_schedulerd.h \
 				 iso8601.h \
 				 logging.h \
 				 logging_compat.h \
 				 mainloop.h \
 				 mainloop_compat.h \
 				 nvpair.h \
 				 output.h \
 				 resources.h		\
 				 results.h \
 				 results_compat.h \
 				 roles.h		\
 				 scheduler.h		\
 				 util.h \
 				 util_compat.h \
 				 xml.h \
 				 xml_compat.h
 
 noinst_HEADERS = acl_internal.h 	\
 				 actions_internal.h	\
 				 alerts_internal.h	\
 				 attrd_internal.h	\
 				 cmdline_internal.h	\
 				 health_internal.h	\
 				 internal.h		\
 				 io_internal.h		\
 				 ipc_internal.h		\
 				 iso8601_internal.h	\
 				 lists_internal.h	\
 				 logging_internal.h	\
 				 messages_internal.h	\
 				 options_internal.h	\
 				 output_internal.h	\
 				 remote_internal.h	\
 				 results_internal.h	\
+				 roles_internal.h	\
+				 scheduler_internal.h	\
 				 strings_internal.h	\
 				 unittest_internal.h \
 				 xml_internal.h
diff --git a/include/crm/common/roles_internal.h b/include/crm/common/roles_internal.h
new file mode 100644
index 0000000000..c544519d10
--- /dev/null
+++ b/include/crm/common/roles_internal.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_ROLES_INTERNAL__H
+#  define PCMK__CRM_COMMON_ROLES_INTERNAL__H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// String equivalents of enum rsc_role_e
+#define PCMK__ROLE_UNKNOWN      "Unknown"
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_ROLES_INTERNAL__H
diff --git a/include/crm/common/scheduler_internal.h b/include/crm/common/scheduler_internal.h
new file mode 100644
index 0000000000..e90a9ff7e2
--- /dev/null
+++ b/include/crm/common/scheduler_internal.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright 2004-2023 the Pacemaker project contributors
+ *
+ * The version control history for this file may have further details.
+ *
+ * This source code is licensed under the GNU Lesser General Public License
+ * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
+ */
+
+#ifndef PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
+#  define PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
+
+#include <crm/common/roles_internal.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // PCMK__CRM_COMMON_SCHEDULER_INTERNAL__H
diff --git a/include/crm/pengine/common.h b/include/crm/pengine/common.h
index 580e13e550..85c734681a 100644
--- a/include/crm/pengine/common.h
+++ b/include/crm/pengine/common.h
@@ -1,184 +1,183 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_PENGINE_COMMON__H
 #  define PCMK__CRM_PENGINE_COMMON__H
 
 #  include <glib.h>
 #  include <regex.h>
 #  include <crm/common/iso8601.h>
 #  include <crm/common/scheduler.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 extern gboolean was_processing_error;
 extern gboolean was_processing_warning;
 
 /* The order is (partially) significant here; the values from action_fail_ignore
  * through action_fail_fence are in order of increasing severity.
  *
  * @COMPAT The values should be ordered and numbered per the "TODO" comments
  *         below, so all values are in order of severity and there is room for
  *         future additions, but that would break API compatibility.
  * @TODO   For now, we just use a function to compare the values specially, but
  *         at the next compatibility break, we should arrange things properly.
  */
 enum action_fail_response {
     action_fail_ignore,     // @TODO = 10
     // @TODO action_fail_demote = 20,
     action_fail_recover,    // @TODO = 30
     // @TODO action_fail_reset_remote = 40,
     // @TODO action_fail_restart_container = 50,
     action_fail_migrate,    // @TODO = 60
     action_fail_block,      // @TODO = 70
     action_fail_stop,       // @TODO = 80
     action_fail_standby,    // @TODO = 90
     action_fail_fence,      // @TODO = 100
 
     // @COMPAT Values below here are out of order for API compatibility
 
     action_fail_restart_container,
 
     /* This is reserved for internal use for remote node connection resources.
      * Fence the remote node if stonith is enabled, otherwise attempt to recover
      * the connection resource. This allows us to specify types of connection
      * resource failures that should result in fencing the remote node
      * (for example, recurring monitor failures).
      */
     action_fail_reset_remote,
 
     action_fail_demote,
 };
 
 /* the "done" action must be the "pre" action +1 */
 enum action_tasks {
     no_action,
     monitor_rsc,
     stop_rsc,
     stopped_rsc,
     start_rsc,
     started_rsc,
     action_notify,
     action_notified,
     action_promote,
     action_promoted,
     action_demote,
     action_demoted,
     shutdown_crm,
     stonith_node
 };
 
 enum rsc_start_requirement {
     rsc_req_nothing,            /* Allowed by custom_action() */
     rsc_req_quorum,             /* Enforced by custom_action() */
     rsc_req_stonith             /* Enforced by native_start_constraints() */
 };
 
-#  define RSC_ROLE_UNKNOWN_S "Unknown"
 #  define RSC_ROLE_STOPPED_S "Stopped"
 #  define RSC_ROLE_STARTED_S "Started"
 #  define RSC_ROLE_UNPROMOTED_S         "Unpromoted"
 #  define RSC_ROLE_PROMOTED_S           "Promoted"
 #  define RSC_ROLE_UNPROMOTED_LEGACY_S  "Slave"
 #  define RSC_ROLE_PROMOTED_LEGACY_S    "Master"
 
 //! Deprecated
 enum pe_print_options {
     pe_print_log            = (1 << 0),
     pe_print_html           = (1 << 1),
     pe_print_ncurses        = (1 << 2),
     pe_print_printf         = (1 << 3),
     pe_print_dev            = (1 << 4),  //! Ignored
     pe_print_details        = (1 << 5),  //! Ignored
     pe_print_max_details    = (1 << 6),  //! Ignored
     pe_print_rsconly        = (1 << 7),
     pe_print_ops            = (1 << 8),
     pe_print_suppres_nl     = (1 << 9),
     pe_print_xml            = (1 << 10),
     pe_print_brief          = (1 << 11),
     pe_print_pending        = (1 << 12),
     pe_print_clone_details  = (1 << 13),
     pe_print_clone_active   = (1 << 14), // Print clone instances only if active
     pe_print_implicit       = (1 << 15)  // Print implicitly created resources
 };
 
 const char *task2text(enum action_tasks task);
 enum action_tasks text2task(const char *task);
 enum rsc_role_e text2role(const char *role);
 const char *role2text(enum rsc_role_e role);
 const char *fail2text(enum action_fail_response fail);
 
 const char *pe_pref(GHashTable * options, const char *name);
 
 /*!
  * \brief Get readable description of a recovery type
  *
  * \param[in] type  Recovery type
  *
  * \return Static string describing \p type
  */
 static inline const char *
 recovery2text(enum rsc_recovery_type type)
 {
     switch (type) {
         case pcmk_multiply_active_stop:
             return "shutting it down";
         case pcmk_multiply_active_restart:
             return "attempting recovery";
         case pcmk_multiply_active_block:
             return "waiting for an administrator";
         case pcmk_multiply_active_unexpected:
             return "stopping unexpected instances";
     }
     return "Unknown";
 }
 
 typedef struct pe_re_match_data {
     char *string;
     int nregs;
     regmatch_t *pmatch;
 } pe_re_match_data_t;
 
 typedef struct pe_match_data {
     pe_re_match_data_t *re;
     GHashTable *params;
     GHashTable *meta;
 } pe_match_data_t;
 
 typedef struct pe_rsc_eval_data {
     const char *standard;
     const char *provider;
     const char *agent;
 } pe_rsc_eval_data_t;
 
 typedef struct pe_op_eval_data {
     const char *op_name;
     guint interval;
 } pe_op_eval_data_t;
 
 typedef struct pe_rule_eval_data {
     GHashTable *node_hash;          // Only used with g_hash_table_lookup()
     enum rsc_role_e role;
     crm_time_t *now;                // @COMPAT could be const
     pe_match_data_t *match_data;    // @COMPAT could be const
     pe_rsc_eval_data_t *rsc_data;   // @COMPAT could be const
     pe_op_eval_data_t *op_data;     // @COMPAT could be const
 } pe_rule_eval_data_t;
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
 #include <crm/pengine/common_compat.h>
 #endif
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif
diff --git a/include/crm/pengine/common_compat.h b/include/crm/pengine/common_compat.h
index 675208cfdc..0e78dd6fd4 100644
--- a/include/crm/pengine/common_compat.h
+++ b/include/crm/pengine/common_compat.h
@@ -1,42 +1,45 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_PENGINE_COMMON_COMPAT__H
 #  define PCMK__CRM_PENGINE_COMMON_COMPAT__H
 
 #include <crm/common/scheduler.h>
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /**
  * \file
  * \brief Deprecated Pacemaker scheduler utilities
  * \ingroup pengine
  * \deprecated Do not include this header directly. The utilities in this
  *             header, and the header itself, will be removed in a future
  *             release.
  */
 
 //! \deprecated Use (pcmk_role_promoted + 1) instead
 #define RSC_ROLE_MAX    (pcmk_role_promoted + 1)
 
+//! \deprecated Use role2text(pcmk_role_unknown) instead
+#define RSC_ROLE_UNKNOWN_S      role2text(pcmk_role_unknown)
+
 //! \deprecated Use RSC_ROLE_UNPROMOTED_LEGACY_S instead
 #  define RSC_ROLE_SLAVE_S   RSC_ROLE_UNPROMOTED_LEGACY_S
 
 //! \deprecated Use RSC_ROLE_PROMOTED_LEGACY_S instead
 #  define RSC_ROLE_MASTER_S  RSC_ROLE_PROMOTED_LEGACY_S
 
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif // PCMK__CRM_PENGINE_COMMON_COMPAT__H
diff --git a/lib/pacemaker/pcmk_sched_colocation.c b/lib/pacemaker/pcmk_sched_colocation.c
index f22761287f..692262d478 100644
--- a/lib/pacemaker/pcmk_sched_colocation.c
+++ b/lib/pacemaker/pcmk_sched_colocation.c
@@ -1,1905 +1,1906 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 #include <glib.h>
 
 #include <crm/crm.h>
+#include <crm/common/scheduler_internal.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 
 #include "crm/common/util.h"
 #include "crm/common/xml_internal.h"
 #include "crm/msg_xml.h"
 #include "libpacemaker_private.h"
 
 // Used to temporarily mark a node as unusable
 #define INFINITY_HACK   (INFINITY * -100)
 
 /*!
  * \internal
  * \brief Compare two colocations according to priority
  *
  * Compare two colocations according to the order in which they should be
  * considered, based on either their dependent resources or their primary
  * resources -- preferring (in order):
  *  * Colocation that is not \c NULL
  *  * Colocation whose resource has higher priority
  *  * Colocation whose resource is of a higher-level variant
  *    (bundle > clone > group > primitive)
  *  * Colocation whose resource is promotable, if both are clones
  *  * Colocation whose resource has lower ID in lexicographic order
  *
  * \param[in] colocation1  First colocation to compare
  * \param[in] colocation2  Second colocation to compare
  * \param[in] dependent    If \c true, compare colocations by dependent
  *                         priority; otherwise compare them by primary priority
  *
  * \return A negative number if \p colocation1 should be considered first,
  *         a positive number if \p colocation2 should be considered first,
  *         or 0 if order doesn't matter
  */
 static gint
 cmp_colocation_priority(const pcmk__colocation_t *colocation1,
                         const pcmk__colocation_t *colocation2, bool dependent)
 {
     const pe_resource_t *rsc1 = NULL;
     const pe_resource_t *rsc2 = NULL;
 
     if (colocation1 == NULL) {
         return 1;
     }
     if (colocation2 == NULL) {
         return -1;
     }
 
     if (dependent) {
         rsc1 = colocation1->dependent;
         rsc2 = colocation2->dependent;
         CRM_ASSERT(colocation1->primary != NULL);
     } else {
         rsc1 = colocation1->primary;
         rsc2 = colocation2->primary;
         CRM_ASSERT(colocation1->dependent != NULL);
     }
     CRM_ASSERT((rsc1 != NULL) && (rsc2 != NULL));
 
     if (rsc1->priority > rsc2->priority) {
         return -1;
     }
     if (rsc1->priority < rsc2->priority) {
         return 1;
     }
 
     // Process clones before primitives and groups
     if (rsc1->variant > rsc2->variant) {
         return -1;
     }
     if (rsc1->variant < rsc2->variant) {
         return 1;
     }
 
     /* @COMPAT scheduler <2.0.0: Process promotable clones before nonpromotable
      * clones (probably unnecessary, but avoids having to update regression
      * tests)
      */
     if (rsc1->variant == pe_clone) {
         if (pcmk_is_set(rsc1->flags, pe_rsc_promotable)
             && !pcmk_is_set(rsc2->flags, pe_rsc_promotable)) {
             return -1;
         }
         if (!pcmk_is_set(rsc1->flags, pe_rsc_promotable)
             && pcmk_is_set(rsc2->flags, pe_rsc_promotable)) {
             return 1;
         }
     }
 
     return strcmp(rsc1->id, rsc2->id);
 }
 
 /*!
  * \internal
  * \brief Compare two colocations according to priority based on dependents
  *
  * Compare two colocations according to the order in which they should be
  * considered, based on their dependent resources -- preferring (in order):
  *  * Colocation that is not \c NULL
  *  * Colocation whose resource has higher priority
  *  * Colocation whose resource is of a higher-level variant
  *    (bundle > clone > group > primitive)
  *  * Colocation whose resource is promotable, if both are clones
  *  * Colocation whose resource has lower ID in lexicographic order
  *
  * \param[in] a  First colocation to compare
  * \param[in] b  Second colocation to compare
  *
  * \return A negative number if \p a should be considered first,
  *         a positive number if \p b should be considered first,
  *         or 0 if order doesn't matter
  */
 static gint
 cmp_dependent_priority(gconstpointer a, gconstpointer b)
 {
     return cmp_colocation_priority(a, b, true);
 }
 
 /*!
  * \internal
  * \brief Compare two colocations according to priority based on primaries
  *
  * Compare two colocations according to the order in which they should be
  * considered, based on their primary resources -- preferring (in order):
  *  * Colocation that is not \c NULL
  *  * Colocation whose primary has higher priority
  *  * Colocation whose primary is of a higher-level variant
  *    (bundle > clone > group > primitive)
  *  * Colocation whose primary is promotable, if both are clones
  *  * Colocation whose primary has lower ID in lexicographic order
  *
  * \param[in] a  First colocation to compare
  * \param[in] b  Second colocation to compare
  *
  * \return A negative number if \p a should be considered first,
  *         a positive number if \p b should be considered first,
  *         or 0 if order doesn't matter
  */
 static gint
 cmp_primary_priority(gconstpointer a, gconstpointer b)
 {
     return cmp_colocation_priority(a, b, false);
 }
 
 /*!
  * \internal
  * \brief Add a "this with" colocation constraint to a sorted list
  *
  * \param[in,out] list        List of constraints to add \p colocation to
  * \param[in]     colocation  Colocation constraint to add to \p list
  * \param[in]     rsc         Resource whose colocations we're getting (for
  *                            logging only)
  *
  * \note The list will be sorted using cmp_primary_priority().
  */
 void
 pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
                     const pe_resource_t *rsc)
 {
     CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
 
     pe_rsc_trace(rsc,
                  "Adding colocation %s (%s with %s using %s @%s) to "
                  "'this with' list for %s",
                  colocation->id, colocation->dependent->id,
                  colocation->primary->id, colocation->node_attribute,
                  pcmk_readable_score(colocation->score), rsc->id);
     *list = g_list_insert_sorted(*list, (gpointer) colocation,
                                  cmp_primary_priority);
 }
 
 /*!
  * \internal
  * \brief Add a list of "this with" colocation constraints to a list
  *
  * \param[in,out] list      List of constraints to add \p addition to
  * \param[in]     addition  List of colocation constraints to add to \p list
  * \param[in]     rsc       Resource whose colocations we're getting (for
  *                          logging only)
  *
  * \note The lists must be pre-sorted by cmp_primary_priority().
  */
 void
 pcmk__add_this_with_list(GList **list, GList *addition,
                          const pe_resource_t *rsc)
 {
     CRM_ASSERT((list != NULL) && (rsc != NULL));
 
     pcmk__if_tracing(
         {}, // Always add each colocation individually if tracing
         {
             if (*list == NULL) {
                 // Trivial case for efficiency if not tracing
                 *list = g_list_copy(addition);
                 return;
             }
         }
     );
 
     for (const GList *iter = addition; iter != NULL; iter = iter->next) {
         pcmk__add_this_with(list, addition->data, rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Add a "with this" colocation constraint to a sorted list
  *
  * \param[in,out] list        List of constraints to add \p colocation to
  * \param[in]     colocation  Colocation constraint to add to \p list
  * \param[in]     rsc         Resource whose colocations we're getting (for
  *                            logging only)
  *
  * \note The list will be sorted using cmp_dependent_priority().
  */
 void
 pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
                     const pe_resource_t *rsc)
 {
     CRM_ASSERT((list != NULL) && (colocation != NULL) && (rsc != NULL));
 
     pe_rsc_trace(rsc,
                  "Adding colocation %s (%s with %s using %s @%s) to "
                  "'with this' list for %s",
                  colocation->id, colocation->dependent->id,
                  colocation->primary->id, colocation->node_attribute,
                  pcmk_readable_score(colocation->score), rsc->id);
     *list = g_list_insert_sorted(*list, (gpointer) colocation,
                                  cmp_dependent_priority);
 }
 
 /*!
  * \internal
  * \brief Add a list of "with this" colocation constraints to a list
  *
  * \param[in,out] list      List of constraints to add \p addition to
  * \param[in]     addition  List of colocation constraints to add to \p list
  * \param[in]     rsc       Resource whose colocations we're getting (for
  *                          logging only)
  *
  * \note The lists must be pre-sorted by cmp_dependent_priority().
  */
 void
 pcmk__add_with_this_list(GList **list, GList *addition,
                          const pe_resource_t *rsc)
 {
     CRM_ASSERT((list != NULL) && (rsc != NULL));
 
     pcmk__if_tracing(
         {}, // Always add each colocation individually if tracing
         {
             if (*list == NULL) {
                 // Trivial case for efficiency if not tracing
                 *list = g_list_copy(addition);
                 return;
             }
         }
     );
 
     for (const GList *iter = addition; iter != NULL; iter = iter->next) {
         pcmk__add_with_this(list, addition->data, rsc);
     }
 }
 
 /*!
  * \internal
  * \brief Add orderings necessary for an anti-colocation constraint
  *
  * \param[in,out] first_rsc   One resource in an anti-colocation
  * \param[in]     first_role  Anti-colocation role of \p first_rsc
  * \param[in]     then_rsc    Other resource in the anti-colocation
  * \param[in]     then_role   Anti-colocation role of \p then_rsc
  */
 static void
 anti_colocation_order(pe_resource_t *first_rsc, int first_role,
                       pe_resource_t *then_rsc, int then_role)
 {
     const char *first_tasks[] = { NULL, NULL };
     const char *then_tasks[] = { NULL, NULL };
 
     /* Actions to make first_rsc lose first_role */
     if (first_role == pcmk_role_promoted) {
         first_tasks[0] = PCMK_ACTION_DEMOTE;
 
     } else {
         first_tasks[0] = PCMK_ACTION_STOP;
 
         if (first_role == pcmk_role_unpromoted) {
             first_tasks[1] = PCMK_ACTION_PROMOTE;
         }
     }
 
     /* Actions to make then_rsc gain then_role */
     if (then_role == pcmk_role_promoted) {
         then_tasks[0] = PCMK_ACTION_PROMOTE;
 
     } else {
         then_tasks[0] = PCMK_ACTION_START;
 
         if (then_role == pcmk_role_unpromoted) {
             then_tasks[1] = PCMK_ACTION_DEMOTE;
         }
     }
 
     for (int first_lpc = 0;
          (first_lpc <= 1) && (first_tasks[first_lpc] != NULL); first_lpc++) {
 
         for (int then_lpc = 0;
              (then_lpc <= 1) && (then_tasks[then_lpc] != NULL); then_lpc++) {
 
             pcmk__order_resource_actions(first_rsc, first_tasks[first_lpc],
                                          then_rsc, then_tasks[then_lpc],
                                          pe_order_anti_colocation);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Add a new colocation constraint to a cluster working set
  *
  * \param[in]     id              XML ID for this constraint
  * \param[in]     node_attr       Colocate by this attribute (NULL for #uname)
  * \param[in]     score           Constraint score
  * \param[in,out] dependent       Resource to be colocated
  * \param[in,out] primary         Resource to colocate \p dependent with
  * \param[in]     dependent_role  Current role of \p dependent
  * \param[in]     primary_role    Current role of \p primary
  * \param[in]     flags           Group of enum pcmk__coloc_flags
  */
 void
 pcmk__new_colocation(const char *id, const char *node_attr, int score,
                      pe_resource_t *dependent, pe_resource_t *primary,
                      const char *dependent_role, const char *primary_role,
                      uint32_t flags)
 {
     pcmk__colocation_t *new_con = NULL;
 
     CRM_CHECK(id != NULL, return);
 
     if ((dependent == NULL) || (primary == NULL)) {
         pcmk__config_err("Ignoring colocation '%s' because resource "
                          "does not exist", id);
         return;
     }
 
     if (score == 0) {
         pe_rsc_trace(dependent,
                      "Ignoring colocation '%s' (%s with %s) because score is 0",
                      id, dependent->id, primary->id);
         return;
     }
 
     new_con = calloc(1, sizeof(pcmk__colocation_t));
     CRM_ASSERT(new_con != NULL);
 
     if (pcmk__str_eq(dependent_role, RSC_ROLE_STARTED_S,
                      pcmk__str_null_matches|pcmk__str_casei)) {
-        dependent_role = RSC_ROLE_UNKNOWN_S;
+        dependent_role = PCMK__ROLE_UNKNOWN;
     }
 
     if (pcmk__str_eq(primary_role, RSC_ROLE_STARTED_S,
                      pcmk__str_null_matches|pcmk__str_casei)) {
-        primary_role = RSC_ROLE_UNKNOWN_S;
+        primary_role = PCMK__ROLE_UNKNOWN;
     }
 
     new_con->id = id;
     new_con->dependent = dependent;
     new_con->primary = primary;
     new_con->score = score;
     new_con->dependent_role = text2role(dependent_role);
     new_con->primary_role = text2role(primary_role);
     new_con->node_attribute = pcmk__s(node_attr, CRM_ATTR_UNAME);
     new_con->flags = flags;
 
     pcmk__add_this_with(&(dependent->rsc_cons), new_con, dependent);
     pcmk__add_with_this(&(primary->rsc_cons_lhs), new_con, primary);
 
     dependent->cluster->colocation_constraints = g_list_prepend(
         dependent->cluster->colocation_constraints, new_con);
 
     if (score <= -INFINITY) {
         anti_colocation_order(dependent, new_con->dependent_role, primary,
                               new_con->primary_role);
         anti_colocation_order(primary, new_con->primary_role, dependent,
                               new_con->dependent_role);
     }
 }
 
 /*!
  * \internal
  * \brief Return the boolean influence corresponding to configuration
  *
  * \param[in] coloc_id     Colocation XML ID (for error logging)
  * \param[in] rsc          Resource involved in constraint (for default)
  * \param[in] influence_s  String value of influence option
  *
  * \return pcmk__coloc_influence if string evaluates true, or string is NULL or
  *         invalid and resource's critical option evaluates true, otherwise
  *         pcmk__coloc_none
  */
 static uint32_t
 unpack_influence(const char *coloc_id, const pe_resource_t *rsc,
                  const char *influence_s)
 {
     if (influence_s != NULL) {
         int influence_i = 0;
 
         if (crm_str_to_boolean(influence_s, &influence_i) < 0) {
             pcmk__config_err("Constraint '%s' has invalid value for "
                              XML_COLOC_ATTR_INFLUENCE " (using default)",
                              coloc_id);
         } else {
             return (influence_i == 0)? pcmk__coloc_none : pcmk__coloc_influence;
         }
     }
     if (pcmk_is_set(rsc->flags, pe_rsc_critical)) {
         return pcmk__coloc_influence;
     }
     return pcmk__coloc_none;
 }
 
 static void
 unpack_colocation_set(xmlNode *set, int score, const char *coloc_id,
                       const char *influence_s, pe_working_set_t *data_set)
 {
     xmlNode *xml_rsc = NULL;
     pe_resource_t *other = NULL;
     pe_resource_t *resource = NULL;
     const char *set_id = ID(set);
     const char *role = crm_element_value(set, "role");
     bool with_previous = false;
     int local_score = score;
     bool sequential = false;
     uint32_t flags = pcmk__coloc_none;
     const char *xml_rsc_id = NULL;
     const char *score_s = crm_element_value(set, XML_RULE_ATTR_SCORE);
 
     if (score_s) {
         local_score = char2score(score_s);
     }
     if (local_score == 0) {
         crm_trace("Ignoring colocation '%s' for set '%s' because score is 0",
                   coloc_id, set_id);
         return;
     }
 
     /* @COMPAT The deprecated "ordering" attribute specifies whether resources
      * in a positive-score set are colocated with the previous or next resource.
      */
     if (pcmk__str_eq(crm_element_value(set, "ordering"), "group",
                      pcmk__str_null_matches|pcmk__str_casei)) {
         with_previous = true;
     } else {
         pe_warn_once(pe_wo_set_ordering,
                      "Support for 'ordering' other than 'group' in "
                      XML_CONS_TAG_RSC_SET " (such as %s) is deprecated and "
                      "will be removed in a future release", set_id);
     }
 
     if ((pcmk__xe_get_bool_attr(set, "sequential", &sequential) == pcmk_rc_ok)
         && !sequential) {
         return;
     }
 
     if (local_score > 0) {
         for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
              xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
             xml_rsc_id = ID(xml_rsc);
             resource = pcmk__find_constraint_resource(data_set->resources,
                                                       xml_rsc_id);
             if (resource == NULL) {
                 // Should be possible only with validation disabled
                 pcmk__config_err("Ignoring %s and later resources in set %s: "
                                  "No such resource", xml_rsc_id, set_id);
                 return;
             }
             if (other != NULL) {
                 flags = pcmk__coloc_explicit
                         | unpack_influence(coloc_id, resource, influence_s);
                 if (with_previous) {
                     pe_rsc_trace(resource, "Colocating %s with %s in set %s",
                                  resource->id, other->id, set_id);
                     pcmk__new_colocation(set_id, NULL, local_score, resource,
                                          other, role, role, flags);
                 } else {
                     pe_rsc_trace(resource, "Colocating %s with %s in set %s",
                                  other->id, resource->id, set_id);
                     pcmk__new_colocation(set_id, NULL, local_score, other,
                                          resource, role, role, flags);
                 }
             }
             other = resource;
         }
 
     } else {
         /* Anti-colocating with every prior resource is
          * the only way to ensure the intuitive result
          * (i.e. that no one in the set can run with anyone else in the set)
          */
 
         for (xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
              xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
             xmlNode *xml_rsc_with = NULL;
 
             xml_rsc_id = ID(xml_rsc);
             resource = pcmk__find_constraint_resource(data_set->resources,
                                                       xml_rsc_id);
             if (resource == NULL) {
                 // Should be possible only with validation disabled
                 pcmk__config_err("Ignoring %s and later resources in set %s: "
                                  "No such resource", xml_rsc_id, set_id);
                 return;
             }
             flags = pcmk__coloc_explicit
                     | unpack_influence(coloc_id, resource, influence_s);
             for (xml_rsc_with = first_named_child(set, XML_TAG_RESOURCE_REF);
                  xml_rsc_with != NULL;
                  xml_rsc_with = crm_next_same_xml(xml_rsc_with)) {
 
                 xml_rsc_id = ID(xml_rsc_with);
                 if (pcmk__str_eq(resource->id, xml_rsc_id, pcmk__str_none)) {
                     break;
                 }
                 other = pcmk__find_constraint_resource(data_set->resources,
                                                        xml_rsc_id);
                 CRM_ASSERT(other != NULL); // We already processed it
                 pcmk__new_colocation(set_id, NULL, local_score,
                                      resource, other, role, role, flags);
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Colocate two resource sets relative to each other
  *
  * \param[in]     id           Colocation XML ID
  * \param[in]     set1         Dependent set
  * \param[in]     set2         Primary set
  * \param[in]     score        Colocation score
  * \param[in]     influence_s  Value of colocation's "influence" attribute
  * \param[in,out] data_set     Cluster working set
  */
 static void
 colocate_rsc_sets(const char *id, const xmlNode *set1, const xmlNode *set2,
                   int score, const char *influence_s,
                   pe_working_set_t *data_set)
 {
     xmlNode *xml_rsc = NULL;
     pe_resource_t *rsc_1 = NULL;
     pe_resource_t *rsc_2 = NULL;
 
     const char *xml_rsc_id = NULL;
     const char *role_1 = crm_element_value(set1, "role");
     const char *role_2 = crm_element_value(set2, "role");
 
     int rc = pcmk_rc_ok;
     bool sequential = false;
     uint32_t flags = pcmk__coloc_none;
 
     if (score == 0) {
         crm_trace("Ignoring colocation '%s' between sets %s and %s "
                   "because score is 0", id, ID(set1), ID(set2));
         return;
     }
 
     rc = pcmk__xe_get_bool_attr(set1, "sequential", &sequential);
     if ((rc != pcmk_rc_ok) || sequential) {
         // Get the first one
         xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
         if (xml_rsc != NULL) {
             xml_rsc_id = ID(xml_rsc);
             rsc_1 = pcmk__find_constraint_resource(data_set->resources,
                                                    xml_rsc_id);
             if (rsc_1 == NULL) {
                 // Should be possible only with validation disabled
                 pcmk__config_err("Ignoring colocation of set %s with set %s "
                                  "because first resource %s not found",
                                  ID(set1), ID(set2), xml_rsc_id);
                 return;
             }
         }
     }
 
     rc = pcmk__xe_get_bool_attr(set2, "sequential", &sequential);
     if ((rc != pcmk_rc_ok) || sequential) {
         // Get the last one
         for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
              xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
             xml_rsc_id = ID(xml_rsc);
         }
         rsc_2 = pcmk__find_constraint_resource(data_set->resources, xml_rsc_id);
         if (rsc_2 == NULL) {
             // Should be possible only with validation disabled
             pcmk__config_err("Ignoring colocation of set %s with set %s "
                              "because last resource %s not found",
                              ID(set1), ID(set2), xml_rsc_id);
             return;
         }
     }
 
     if ((rsc_1 != NULL) && (rsc_2 != NULL)) { // Both sets are sequential
         flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
         pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1, role_2,
                              flags);
 
     } else if (rsc_1 != NULL) { // Only set1 is sequential
         flags = pcmk__coloc_explicit | unpack_influence(id, rsc_1, influence_s);
         for (xml_rsc = first_named_child(set2, XML_TAG_RESOURCE_REF);
              xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
             xml_rsc_id = ID(xml_rsc);
             rsc_2 = pcmk__find_constraint_resource(data_set->resources,
                                                    xml_rsc_id);
             if (rsc_2 == NULL) {
                 // Should be possible only with validation disabled
                 pcmk__config_err("Ignoring set %s colocation with resource %s "
                                  "in set %s: No such resource",
                                  ID(set1), xml_rsc_id, ID(set2));
                 continue;
             }
             pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
                                  role_2, flags);
         }
 
     } else if (rsc_2 != NULL) { // Only set2 is sequential
         for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
              xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
             xml_rsc_id = ID(xml_rsc);
             rsc_1 = pcmk__find_constraint_resource(data_set->resources,
                                                    xml_rsc_id);
             if (rsc_1 == NULL) {
                 // Should be possible only with validation disabled
                 pcmk__config_err("Ignoring colocation of set %s resource %s "
                                  "with set %s: No such resource",
                                  ID(set1), xml_rsc_id, ID(set2));
                 continue;
             }
             flags = pcmk__coloc_explicit
                     | unpack_influence(id, rsc_1, influence_s);
             pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2, role_1,
                                  role_2, flags);
         }
 
     } else { // Neither set is sequential
         for (xml_rsc = first_named_child(set1, XML_TAG_RESOURCE_REF);
              xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
             xmlNode *xml_rsc_2 = NULL;
 
             xml_rsc_id = ID(xml_rsc);
             rsc_1 = pcmk__find_constraint_resource(data_set->resources,
                                                    xml_rsc_id);
             if (rsc_1 == NULL) {
                 // Should be possible only with validation disabled
                 pcmk__config_err("Ignoring colocation of set %s resource %s "
                                  "with set %s: No such resource",
                                  ID(set1), xml_rsc_id, ID(set2));
                 continue;
             }
 
             flags = pcmk__coloc_explicit
                     | unpack_influence(id, rsc_1, influence_s);
             for (xml_rsc_2 = first_named_child(set2, XML_TAG_RESOURCE_REF);
                  xml_rsc_2 != NULL;
                  xml_rsc_2 = crm_next_same_xml(xml_rsc_2)) {
 
                 xml_rsc_id = ID(xml_rsc_2);
                 rsc_2 = pcmk__find_constraint_resource(data_set->resources,
                                                        xml_rsc_id);
                 if (rsc_2 == NULL) {
                     // Should be possible only with validation disabled
                     pcmk__config_err("Ignoring colocation of set %s resource "
                                      "%s with set %s resource %s: No such "
                                      "resource", ID(set1), ID(xml_rsc),
                                      ID(set2), xml_rsc_id);
                     continue;
                 }
                 pcmk__new_colocation(id, NULL, score, rsc_1, rsc_2,
                                      role_1, role_2, flags);
             }
         }
     }
 }
 
 static void
 unpack_simple_colocation(xmlNode *xml_obj, const char *id,
                          const char *influence_s, pe_working_set_t *data_set)
 {
     int score_i = 0;
     uint32_t flags = pcmk__coloc_none;
 
     const char *score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
     const char *dependent_id = crm_element_value(xml_obj,
                                                  XML_COLOC_ATTR_SOURCE);
     const char *primary_id = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET);
     const char *dependent_role = crm_element_value(xml_obj,
                                                    XML_COLOC_ATTR_SOURCE_ROLE);
     const char *primary_role = crm_element_value(xml_obj,
                                                  XML_COLOC_ATTR_TARGET_ROLE);
     const char *attr = crm_element_value(xml_obj, XML_COLOC_ATTR_NODE_ATTR);
 
     const char *primary_instance = NULL;
     const char *dependent_instance = NULL;
     pe_resource_t *primary = NULL;
     pe_resource_t *dependent = NULL;
 
     primary = pcmk__find_constraint_resource(data_set->resources, primary_id);
     dependent = pcmk__find_constraint_resource(data_set->resources,
                                                dependent_id);
 
     // @COMPAT: Deprecated since 2.1.5
     primary_instance = crm_element_value(xml_obj,
                                          XML_COLOC_ATTR_TARGET_INSTANCE);
     dependent_instance = crm_element_value(xml_obj,
                                            XML_COLOC_ATTR_SOURCE_INSTANCE);
     if (dependent_instance != NULL) {
         pe_warn_once(pe_wo_coloc_inst,
                      "Support for " XML_COLOC_ATTR_SOURCE_INSTANCE " is "
                      "deprecated and will be removed in a future release.");
     }
     if (primary_instance != NULL) {
         pe_warn_once(pe_wo_coloc_inst,
                      "Support for " XML_COLOC_ATTR_TARGET_INSTANCE " is "
                      "deprecated and will be removed in a future release.");
     }
 
     if (dependent == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, dependent_id);
         return;
 
     } else if (primary == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, primary_id);
         return;
 
     } else if ((dependent_instance != NULL) && !pe_rsc_is_clone(dependent)) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, dependent_id, dependent_instance);
         return;
 
     } else if ((primary_instance != NULL) && !pe_rsc_is_clone(primary)) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, primary_id, primary_instance);
         return;
     }
 
     if (dependent_instance != NULL) {
         dependent = find_clone_instance(dependent, dependent_instance);
         if (dependent == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               id, dependent_id, dependent_instance);
             return;
         }
     }
 
     if (primary_instance != NULL) {
         primary = find_clone_instance(primary, primary_instance);
         if (primary == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               "'%s'", id, primary_id, primary_instance);
             return;
         }
     }
 
     if (pcmk__xe_attr_is_true(xml_obj, XML_CONS_ATTR_SYMMETRICAL)) {
         pcmk__config_warn("The colocation constraint '"
                           XML_CONS_ATTR_SYMMETRICAL
                           "' attribute has been removed");
     }
 
     if (score) {
         score_i = char2score(score);
     }
 
     flags = pcmk__coloc_explicit | unpack_influence(id, dependent, influence_s);
     pcmk__new_colocation(id, attr, score_i, dependent, primary,
                          dependent_role, primary_role, flags);
 }
 
 // \return Standard Pacemaker return code
 static int
 unpack_colocation_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
                        pe_working_set_t *data_set)
 {
     const char *id = NULL;
     const char *dependent_id = NULL;
     const char *primary_id = NULL;
     const char *dependent_role = NULL;
     const char *primary_role = NULL;
 
     pe_resource_t *dependent = NULL;
     pe_resource_t *primary = NULL;
 
     pe_tag_t *dependent_tag = NULL;
     pe_tag_t *primary_tag = NULL;
 
     xmlNode *dependent_set = NULL;
     xmlNode *primary_set = NULL;
     bool any_sets = false;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return EINVAL);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          xml_obj->name);
         return pcmk_rc_unpack_error;
     }
 
     // Check whether there are any resource sets with template or tag references
     *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
     if (*expanded_xml != NULL) {
         crm_log_xml_trace(*expanded_xml, "Expanded rsc_colocation");
         return pcmk_rc_ok;
     }
 
     dependent_id = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
     primary_id = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET);
     if ((dependent_id == NULL) || (primary_id == NULL)) {
         return pcmk_rc_ok;
     }
 
     if (!pcmk__valid_resource_or_tag(data_set, dependent_id, &dependent,
                                      &dependent_tag)) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, dependent_id);
         return pcmk_rc_unpack_error;
     }
 
     if (!pcmk__valid_resource_or_tag(data_set, primary_id, &primary,
                                      &primary_tag)) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, primary_id);
         return pcmk_rc_unpack_error;
     }
 
     if ((dependent != NULL) && (primary != NULL)) {
         /* Neither side references any template/tag. */
         return pcmk_rc_ok;
     }
 
     if ((dependent_tag != NULL) && (primary_tag != NULL)) {
         // A colocation constraint between two templates/tags makes no sense
         pcmk__config_err("Ignoring constraint '%s' because two templates or "
                          "tags cannot be colocated", id);
         return pcmk_rc_unpack_error;
     }
 
     dependent_role = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE);
     primary_role = crm_element_value(xml_obj, XML_COLOC_ATTR_TARGET_ROLE);
 
     *expanded_xml = copy_xml(xml_obj);
 
     // Convert dependent's template/tag reference into constraint resource_set
     if (!pcmk__tag_to_set(*expanded_xml, &dependent_set, XML_COLOC_ATTR_SOURCE,
                           true, data_set)) {
         free_xml(*expanded_xml);
         *expanded_xml = NULL;
         return pcmk_rc_unpack_error;
     }
 
     if (dependent_set != NULL) {
         if (dependent_role != NULL) {
             // Move "rsc-role" into converted resource_set as "role"
             crm_xml_add(dependent_set, "role", dependent_role);
             xml_remove_prop(*expanded_xml, XML_COLOC_ATTR_SOURCE_ROLE);
         }
         any_sets = true;
     }
 
     // Convert primary's template/tag reference into constraint resource_set
     if (!pcmk__tag_to_set(*expanded_xml, &primary_set, XML_COLOC_ATTR_TARGET,
                           true, data_set)) {
         free_xml(*expanded_xml);
         *expanded_xml = NULL;
         return pcmk_rc_unpack_error;
     }
 
     if (primary_set != NULL) {
         if (primary_role != NULL) {
             // Move "with-rsc-role" into converted resource_set as "role"
             crm_xml_add(primary_set, "role", primary_role);
             xml_remove_prop(*expanded_xml, XML_COLOC_ATTR_TARGET_ROLE);
         }
         any_sets = true;
     }
 
     if (any_sets) {
         crm_log_xml_trace(*expanded_xml, "Expanded rsc_colocation");
     } else {
         free_xml(*expanded_xml);
         *expanded_xml = NULL;
     }
 
     return pcmk_rc_ok;
 }
 
 /*!
  * \internal
  * \brief Parse a colocation constraint from XML into a cluster working set
  *
  * \param[in,out] xml_obj   Colocation constraint XML to unpack
  * \param[in,out] data_set  Cluster working set to add constraint to
  */
 void
 pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     int score_i = 0;
     xmlNode *set = NULL;
     xmlNode *last = NULL;
 
     xmlNode *orig_xml = NULL;
     xmlNode *expanded_xml = NULL;
 
     const char *id = crm_element_value(xml_obj, XML_ATTR_ID);
     const char *score = NULL;
     const char *influence_s = NULL;
 
     if (pcmk__str_empty(id)) {
         pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_DEPEND
                          " without " CRM_ATTR_ID);
         return;
     }
 
     if (unpack_colocation_tags(xml_obj, &expanded_xml,
                                data_set) != pcmk_rc_ok) {
         return;
     }
     if (expanded_xml != NULL) {
         orig_xml = xml_obj;
         xml_obj = expanded_xml;
     }
 
     score = crm_element_value(xml_obj, XML_RULE_ATTR_SCORE);
     if (score != NULL) {
         score_i = char2score(score);
     }
     influence_s = crm_element_value(xml_obj, XML_COLOC_ATTR_INFLUENCE);
 
     for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
          set = crm_next_same_xml(set)) {
 
         set = expand_idref(set, data_set->input);
         if (set == NULL) { // Configuration error, message already logged
             if (expanded_xml != NULL) {
                 free_xml(expanded_xml);
             }
             return;
         }
 
         if (pcmk__str_empty(ID(set))) {
             pcmk__config_err("Ignoring " XML_CONS_TAG_RSC_SET
                              " without " CRM_ATTR_ID);
             continue;
         }
         unpack_colocation_set(set, score_i, id, influence_s, data_set);
 
         if (last != NULL) {
             colocate_rsc_sets(id, last, set, score_i, influence_s, data_set);
         }
         last = set;
     }
 
     if (expanded_xml) {
         free_xml(expanded_xml);
         xml_obj = orig_xml;
     }
 
     if (last == NULL) {
         unpack_simple_colocation(xml_obj, id, influence_s, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Make actions of a given type unrunnable for a given resource
  *
  * \param[in,out] rsc     Resource whose actions should be blocked
  * \param[in]     task    Name of action to block
  * \param[in]     reason  Unrunnable start action causing the block
  */
 static void
 mark_action_blocked(pe_resource_t *rsc, const char *task,
                     const pe_resource_t *reason)
 {
     GList *iter = NULL;
     char *reason_text = crm_strdup_printf("colocation with %s", reason->id);
 
     for (iter = rsc->actions; iter != NULL; iter = iter->next) {
         pe_action_t *action = iter->data;
 
         if (pcmk_is_set(action->flags, pe_action_runnable)
             && pcmk__str_eq(action->task, task, pcmk__str_none)) {
 
             pe__clear_action_flags(action, pe_action_runnable);
             pe_action_set_reason(action, reason_text, false);
             pcmk__block_colocation_dependents(action);
             pcmk__update_action_for_orderings(action, rsc->cluster);
         }
     }
 
     // If parent resource can't perform an action, neither can any children
     for (iter = rsc->children; iter != NULL; iter = iter->next) {
         mark_action_blocked((pe_resource_t *) (iter->data), task, reason);
     }
     free(reason_text);
 }
 
 /*!
  * \internal
  * \brief If an action is unrunnable, block any relevant dependent actions
  *
  * If a given action is an unrunnable start or promote, block the start or
  * promote actions of resources colocated with it, as appropriate to the
  * colocations' configured roles.
  *
  * \param[in,out] action  Action to check
  */
 void
 pcmk__block_colocation_dependents(pe_action_t *action)
 {
     GList *iter = NULL;
     GList *colocations = NULL;
     pe_resource_t *rsc = NULL;
     bool is_start = false;
 
     if (pcmk_is_set(action->flags, pe_action_runnable)) {
         return; // Only unrunnable actions block dependents
     }
 
     is_start = pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none);
     if (!is_start
         && !pcmk__str_eq(action->task, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
         return; // Only unrunnable starts and promotes block dependents
     }
 
     CRM_ASSERT(action->rsc != NULL); // Start and promote are resource actions
 
     /* If this resource is part of a collective resource, dependents are blocked
      * only if all instances of the collective are unrunnable, so check the
      * collective resource.
      */
     rsc = uber_parent(action->rsc);
     if (rsc->parent != NULL) {
         rsc = rsc->parent; // Bundle
     }
 
     // Colocation fails only if entire primary can't reach desired role
     for (iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child = iter->data;
         pe_action_t *child_action = find_first_action(child->actions, NULL,
                                                       action->task, NULL);
 
         if ((child_action == NULL)
             || pcmk_is_set(child_action->flags, pe_action_runnable)) {
             crm_trace("Not blocking %s colocation dependents because "
                       "at least %s has runnable %s",
                       rsc->id, child->id, action->task);
             return; // At least one child can reach desired role
         }
     }
 
     crm_trace("Blocking %s colocation dependents due to unrunnable %s %s",
               rsc->id, action->rsc->id, action->task);
 
     // Check each colocation where this resource is primary
     colocations = pcmk__with_this_colocations(rsc);
     for (iter = colocations; iter != NULL; iter = iter->next) {
         pcmk__colocation_t *colocation = iter->data;
 
         if (colocation->score < INFINITY) {
             continue; // Only mandatory colocations block dependent
         }
 
         /* If the primary can't start, the dependent can't reach its colocated
          * role, regardless of what the primary or dependent colocation role is.
          *
          * If the primary can't be promoted, the dependent can't reach its
          * colocated role if the primary's colocation role is promoted.
          */
         if (!is_start && (colocation->primary_role != pcmk_role_promoted)) {
             continue;
         }
 
         // Block the dependent from reaching its colocated role
         if (colocation->dependent_role == pcmk_role_promoted) {
             mark_action_blocked(colocation->dependent, PCMK_ACTION_PROMOTE,
                                 action->rsc);
         } else {
             mark_action_blocked(colocation->dependent, PCMK_ACTION_START,
                                 action->rsc);
         }
     }
     g_list_free(colocations);
 }
 
 /*!
  * \internal
  * \brief Get the resource to use for role comparisons
  *
  * A bundle replica includes a container and possibly an instance of the bundled
  * resource. The dependent in a "with bundle" colocation is colocated with a
  * particular bundle container. However, if the colocation includes a role, then
  * the role must be checked on the bundled resource instance inside the
  * container. The container itself will never be promoted; the bundled resource
  * may be.
  *
  * If the given resource is a bundle replica container, return the resource
  * inside it, if any. Otherwise, return the resource itself.
  *
  * \param[in] rsc  Resource to check
  *
  * \return Resource to use for role comparisons
  */
 static const pe_resource_t *
 get_resource_for_role(const pe_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_replica_container)) {
         const pe_resource_t *child = pe__get_rsc_in_container(rsc);
 
         if (child != NULL) {
             return child;
         }
     }
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Determine how a colocation constraint should affect a resource
  *
  * Colocation constraints have different effects at different points in the
  * scheduler sequence. Initially, they affect a resource's location; once that
  * is determined, then for promotable clones they can affect a resource
  * instance's role; after both are determined, the constraints no longer matter.
  * Given a specific colocation constraint, check what has been done so far to
  * determine what should be affected at the current point in the scheduler.
  *
  * \param[in] dependent   Dependent resource in colocation
  * \param[in] primary     Primary resource in colocation
  * \param[in] colocation  Colocation constraint
  * \param[in] preview     If true, pretend resources have already been assigned
  *
  * \return How colocation constraint should be applied at this point
  */
 enum pcmk__coloc_affects
 pcmk__colocation_affects(const pe_resource_t *dependent,
                          const pe_resource_t *primary,
                          const pcmk__colocation_t *colocation, bool preview)
 {
     const pe_resource_t *dependent_role_rsc = NULL;
     const pe_resource_t *primary_role_rsc = NULL;
 
     CRM_ASSERT((dependent != NULL) && (primary != NULL)
                && (colocation != NULL));
 
     if (!preview && pcmk_is_set(primary->flags, pe_rsc_provisional)) {
         // Primary resource has not been assigned yet, so we can't do anything
         return pcmk__coloc_affects_nothing;
     }
 
     dependent_role_rsc = get_resource_for_role(dependent);
     primary_role_rsc = get_resource_for_role(primary);
 
     if ((colocation->dependent_role >= pcmk_role_unpromoted)
         && (dependent_role_rsc->parent != NULL)
         && pcmk_is_set(dependent_role_rsc->parent->flags, pe_rsc_promotable)
         && !pcmk_is_set(dependent_role_rsc->flags, pe_rsc_provisional)) {
 
         /* This is a colocation by role, and the dependent is a promotable clone
          * that has already been assigned, so the colocation should now affect
          * the role.
          */
         return pcmk__coloc_affects_role;
     }
 
     if (!preview && !pcmk_is_set(dependent->flags, pe_rsc_provisional)) {
         /* The dependent resource has already been through assignment, so the
          * constraint no longer has any effect. Log an error if a mandatory
          * colocation constraint has been violated.
          */
 
         const pe_node_t *primary_node = primary->allocated_to;
 
         if (dependent->allocated_to == NULL) {
             crm_trace("Skipping colocation '%s': %s will not run anywhere",
                       colocation->id, dependent->id);
 
         } else if (colocation->score >= INFINITY) {
             // Dependent resource must colocate with primary resource
 
             if (!pe__same_node(primary_node, dependent->allocated_to)) {
                 crm_err("%s must be colocated with %s but is not (%s vs. %s)",
                         dependent->id, primary->id,
                         pe__node_name(dependent->allocated_to),
                         pe__node_name(primary_node));
             }
 
         } else if (colocation->score <= -CRM_SCORE_INFINITY) {
             // Dependent resource must anti-colocate with primary resource
 
             if (pe__same_node(dependent->allocated_to, primary_node)) {
                 crm_err("%s and %s must be anti-colocated but are assigned "
                         "to the same node (%s)",
                         dependent->id, primary->id,
                         pe__node_name(primary_node));
             }
         }
         return pcmk__coloc_affects_nothing;
     }
 
     if ((colocation->dependent_role != pcmk_role_unknown)
         && (colocation->dependent_role != dependent_role_rsc->next_role)) {
         crm_trace("Skipping %scolocation '%s': dependent limited to %s role "
 
                   "but %s next role is %s",
                   ((colocation->score < 0)? "anti-" : ""),
                   colocation->id, role2text(colocation->dependent_role),
                   dependent_role_rsc->id,
                   role2text(dependent_role_rsc->next_role));
         return pcmk__coloc_affects_nothing;
     }
 
     if ((colocation->primary_role != pcmk_role_unknown)
         && (colocation->primary_role != primary_role_rsc->next_role)) {
         crm_trace("Skipping %scolocation '%s': primary limited to %s role "
                   "but %s next role is %s",
                   ((colocation->score < 0)? "anti-" : ""),
                   colocation->id, role2text(colocation->primary_role),
                   primary_role_rsc->id, role2text(primary_role_rsc->next_role));
         return pcmk__coloc_affects_nothing;
     }
 
     return pcmk__coloc_affects_location;
 }
 
 /*!
  * \internal
  * \brief Apply colocation to dependent for assignment purposes
  *
  * Update the allowed node scores of the dependent resource in a colocation,
  * for the purposes of assigning it to a node.
  *
  * \param[in,out] dependent   Dependent resource in colocation
  * \param[in]     primary     Primary resource in colocation
  * \param[in]     colocation  Colocation constraint
  */
 void
 pcmk__apply_coloc_to_scores(pe_resource_t *dependent,
                             const pe_resource_t *primary,
                             const pcmk__colocation_t *colocation)
 {
     const char *attr = colocation->node_attribute;
     const char *value = NULL;
     GHashTable *work = NULL;
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (primary->allocated_to != NULL) {
         value = pcmk__colocation_node_attr(primary->allocated_to, attr,
                                            primary);
 
     } else if (colocation->score < 0) {
         // Nothing to do (anti-colocation with something that is not running)
         return;
     }
 
     work = pcmk__copy_node_table(dependent->allowed_nodes);
 
     g_hash_table_iter_init(&iter, work);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         if (primary->allocated_to == NULL) {
             node->weight = pcmk__add_scores(-colocation->score, node->weight);
             pe_rsc_trace(dependent,
                          "Applied %s to %s score on %s (now %s after "
                          "subtracting %s because primary %s inactive)",
                          colocation->id, dependent->id, pe__node_name(node),
                          pcmk_readable_score(node->weight),
                          pcmk_readable_score(colocation->score), primary->id);
             continue;
         }
 
         if (pcmk__str_eq(pcmk__colocation_node_attr(node, attr, dependent),
                          value, pcmk__str_casei)) {
 
             /* Add colocation score only if optional (or minus infinity). A
              * mandatory colocation is a requirement rather than a preference,
              * so we don't need to consider it for relative assignment purposes.
              * The resource will simply be forbidden from running on the node if
              * the primary isn't active there (via the condition above).
              */
             if (colocation->score < CRM_SCORE_INFINITY) {
                 node->weight = pcmk__add_scores(colocation->score,
                                                 node->weight);
                 pe_rsc_trace(dependent,
                              "Applied %s to %s score on %s (now %s after "
                              "adding %s)",
                              colocation->id, dependent->id, pe__node_name(node),
                              pcmk_readable_score(node->weight),
                              pcmk_readable_score(colocation->score));
             }
             continue;
         }
 
         if (colocation->score >= CRM_SCORE_INFINITY) {
             /* Only mandatory colocations are relevant when the colocation
              * attribute doesn't match, because an attribute not matching is not
              * a negative preference -- the colocation is simply relevant only
              * where it matches.
              */
             node->weight = -CRM_SCORE_INFINITY;
             pe_rsc_trace(dependent,
                          "Banned %s from %s because colocation %s attribute %s "
                          "does not match",
                          dependent->id, pe__node_name(node), colocation->id,
                          attr);
         }
     }
 
     if ((colocation->score <= -INFINITY) || (colocation->score >= INFINITY)
         || pcmk__any_node_available(work)) {
 
         g_hash_table_destroy(dependent->allowed_nodes);
         dependent->allowed_nodes = work;
         work = NULL;
 
     } else {
         pe_rsc_info(dependent,
                     "%s: Rolling back scores from %s (no available nodes)",
                     dependent->id, primary->id);
     }
 
     if (work != NULL) {
         g_hash_table_destroy(work);
     }
 }
 
 /*!
  * \internal
  * \brief Apply colocation to dependent for role purposes
  *
  * Update the priority of the dependent resource in a colocation, for the
  * purposes of selecting its role
  *
  * \param[in,out] dependent   Dependent resource in colocation
  * \param[in]     primary     Primary resource in colocation
  * \param[in]     colocation  Colocation constraint
  */
 void
 pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
                               const pe_resource_t *primary,
                               const pcmk__colocation_t *colocation)
 {
     const char *dependent_value = NULL;
     const char *primary_value = NULL;
     const char *attr = colocation->node_attribute;
     int score_multiplier = 1;
 
     const pe_resource_t *primary_role_rsc = NULL;
 
     CRM_ASSERT((dependent != NULL) && (primary != NULL) &&
                (colocation != NULL));
 
     if ((primary->allocated_to == NULL) || (dependent->allocated_to == NULL)) {
         return;
     }
 
     dependent_value = pcmk__colocation_node_attr(dependent->allocated_to, attr,
                                                  dependent);
     primary_value = pcmk__colocation_node_attr(primary->allocated_to, attr,
                                                primary);
 
     primary_role_rsc = get_resource_for_role(primary);
 
     if (!pcmk__str_eq(dependent_value, primary_value, pcmk__str_casei)) {
         if ((colocation->score == INFINITY)
             && (colocation->dependent_role == pcmk_role_promoted)) {
             dependent->priority = -INFINITY;
         }
         return;
     }
 
     if ((colocation->primary_role != pcmk_role_unknown)
         && (colocation->primary_role != primary_role_rsc->next_role)) {
         return;
     }
 
     if (colocation->dependent_role == pcmk_role_unpromoted) {
         score_multiplier = -1;
     }
 
     dependent->priority = pcmk__add_scores(score_multiplier * colocation->score,
                                            dependent->priority);
     pe_rsc_trace(dependent,
                  "Applied %s to %s promotion priority (now %s after %s %s)",
                  colocation->id, dependent->id,
                  pcmk_readable_score(dependent->priority),
                  ((score_multiplier == 1)? "adding" : "subtracting"),
                  pcmk_readable_score(colocation->score));
 }
 
 /*!
  * \internal
  * \brief Find score of highest-scored node that matches colocation attribute
  *
  * \param[in] rsc    Resource whose allowed nodes should be searched
  * \param[in] attr   Colocation attribute name (must not be NULL)
  * \param[in] value  Colocation attribute value to require
  */
 static int
 best_node_score_matching_attr(const pe_resource_t *rsc, const char *attr,
                               const char *value)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     int best_score = -INFINITY;
     const char *best_node = NULL;
 
     // Find best allowed node with matching attribute
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
 
         if ((node->weight > best_score)
             && pcmk__node_available(node, false, false)
             && pcmk__str_eq(value, pcmk__colocation_node_attr(node, attr, rsc),
                             pcmk__str_casei)) {
 
             best_score = node->weight;
             best_node = node->details->uname;
         }
     }
 
     if (!pcmk__str_eq(attr, CRM_ATTR_UNAME, pcmk__str_none)) {
         if (best_node == NULL) {
             crm_info("No allowed node for %s matches node attribute %s=%s",
                      rsc->id, attr, value);
         } else {
             crm_info("Allowed node %s for %s had best score (%d) "
                      "of those matching node attribute %s=%s",
                      best_node, rsc->id, best_score, attr, value);
         }
     }
     return best_score;
 }
 
 /*!
  * \internal
  * \brief Check whether a resource is allowed only on a single node
  *
  * \param[in] rsc   Resource to check
  *
  * \return \c true if \p rsc is allowed only on one node, otherwise \c false
  */
 static bool
 allowed_on_one(const pe_resource_t *rsc)
 {
     GHashTableIter iter;
     pe_node_t *allowed_node = NULL;
     int allowed_nodes = 0;
 
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &allowed_node)) {
         if ((allowed_node->weight >= 0) && (++allowed_nodes > 1)) {
             pe_rsc_trace(rsc, "%s is allowed on multiple nodes", rsc->id);
             return false;
         }
     }
     pe_rsc_trace(rsc, "%s is allowed %s", rsc->id,
                  ((allowed_nodes == 1)? "on a single node" : "nowhere"));
     return (allowed_nodes == 1);
 }
 
 /*!
  * \internal
  * \brief Add resource's colocation matches to current node assignment scores
  *
  * For each node in a given table, if any of a given resource's allowed nodes
  * have a matching value for the colocation attribute, add the highest of those
  * nodes' scores to the node's score.
  *
  * \param[in,out] nodes          Table of nodes with assignment scores so far
  * \param[in]     source_rsc     Resource whose node scores to add
  * \param[in]     target_rsc     Resource on whose behalf to update \p nodes
  * \param[in]     colocation     Original colocation constraint (used to get
  *                               configured primary resource's stickiness, and
  *                               to get colocation node attribute; pass NULL to
  *                               ignore stickiness and use default attribute)
  * \param[in]     factor         Factor by which to multiply scores being added
  * \param[in]     only_positive  Whether to add only positive scores
  */
 static void
 add_node_scores_matching_attr(GHashTable *nodes,
                               const pe_resource_t *source_rsc,
                               const pe_resource_t *target_rsc,
                               const pcmk__colocation_t *colocation,
                               float factor, bool only_positive)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     const char *attr = colocation->node_attribute;
 
     // Iterate through each node
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
         float delta_f = 0;
         int delta = 0;
         int score = 0;
         int new_score = 0;
         const char *value = pcmk__colocation_node_attr(node, attr, target_rsc);
 
         score = best_node_score_matching_attr(source_rsc, attr, value);
 
         if ((factor < 0) && (score < 0)) {
             /* If the dependent is anti-colocated, we generally don't want the
              * primary to prefer nodes that the dependent avoids. That could
              * lead to unnecessary shuffling of the primary when the dependent
              * hits its migration threshold somewhere, for example.
              *
              * However, there are cases when it is desirable. If the dependent
              * can't run anywhere but where the primary is, it would be
              * worthwhile to move the primary for the sake of keeping the
              * dependent active.
              *
              * We can't know that exactly at this point since we don't know
              * where the primary will be assigned, but we can limit considering
              * the preference to when the dependent is allowed only on one node.
              * This is less than ideal for multiple reasons:
              *
              * - the dependent could be allowed on more than one node but have
              *   anti-colocation primaries on each;
              * - the dependent could be a clone or bundle with multiple
              *   instances, and the dependent as a whole is allowed on multiple
              *   nodes but some instance still can't run
              * - the dependent has considered node-specific criteria such as
              *   location constraints and stickiness by this point, but might
              *   have other factors that end up disallowing a node
              *
              * but the alternative is making the primary move when it doesn't
              * need to.
              *
              * We also consider the primary's stickiness and influence, so the
              * user has some say in the matter. (This is the configured primary,
              * not a particular instance of the primary, but that doesn't matter
              * unless stickiness uses a rule to vary by node, and that seems
              * acceptable to ignore.)
              */
             if ((colocation->primary->stickiness >= -score)
                 || !pcmk__colocation_has_influence(colocation, NULL)
                 || !allowed_on_one(colocation->dependent)) {
                 crm_trace("%s: Filtering %d + %f * %d "
                           "(double negative disallowed)",
                           pe__node_name(node), node->weight, factor, score);
                 continue;
             }
         }
 
         if (node->weight == INFINITY_HACK) {
             crm_trace("%s: Filtering %d + %f * %d (node was marked unusable)",
                       pe__node_name(node), node->weight, factor, score);
             continue;
         }
 
         delta_f = factor * score;
 
         // Round the number; see http://c-faq.com/fp/round.html
         delta = (int) ((delta_f < 0)? (delta_f - 0.5) : (delta_f + 0.5));
 
         /* Small factors can obliterate the small scores that are often actually
          * used in configurations. If the score and factor are nonzero, ensure
          * that the result is nonzero as well.
          */
         if ((delta == 0) && (score != 0)) {
             if (factor > 0.0) {
                 delta = 1;
             } else if (factor < 0.0) {
                 delta = -1;
             }
         }
 
         new_score = pcmk__add_scores(delta, node->weight);
 
         if (only_positive && (new_score < 0) && (node->weight > 0)) {
             crm_trace("%s: Filtering %d + %f * %d = %d "
                       "(negative disallowed, marking node unusable)",
                       pe__node_name(node), node->weight, factor, score,
                       new_score);
             node->weight = INFINITY_HACK;
             continue;
         }
 
         if (only_positive && (new_score < 0) && (node->weight == 0)) {
             crm_trace("%s: Filtering %d + %f * %d = %d (negative disallowed)",
                       pe__node_name(node), node->weight, factor, score,
                       new_score);
             continue;
         }
 
         crm_trace("%s: %d + %f * %d = %d", pe__node_name(node),
                   node->weight, factor, score, new_score);
         node->weight = new_score;
     }
 }
 
 /*!
  * \internal
  * \brief Update nodes with scores of colocated resources' nodes
  *
  * Given a table of nodes and a resource, update the nodes' scores with the
  * scores of the best nodes matching the attribute used for each of the
  * resource's relevant colocations.
  *
  * \param[in,out] source_rsc  Resource whose node scores to add
  * \param[in]     target_rsc  Resource on whose behalf to update \p *nodes
  * \param[in]     log_id      Resource ID for logs (if \c NULL, use
  *                            \p source_rsc ID)
  * \param[in,out] nodes       Nodes to update (set initial contents to \c NULL
  *                            to copy allowed nodes from \p source_rsc)
  * \param[in]     colocation  Original colocation constraint (used to get
  *                            configured primary resource's stickiness, and
  *                            to get colocation node attribute; if \c NULL,
  *                            <tt>source_rsc</tt>'s own matching node scores
  *                            will not be added, and \p *nodes must be \c NULL
  *                            as well)
  * \param[in]     factor      Incorporate scores multiplied by this factor
  * \param[in]     flags       Bitmask of enum pcmk__coloc_select values
  *
  * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation, and
  *       the \c pcmk__coloc_select_this_with flag are used together (and only by
  *       \c cmp_resources()).
  * \note The caller remains responsible for freeing \p *nodes.
  * \note This is the shared implementation of
  *       \c resource_alloc_functions_t:add_colocated_node_scores().
  */
 void
 pcmk__add_colocated_node_scores(pe_resource_t *source_rsc,
                                 const pe_resource_t *target_rsc,
                                 const char *log_id,
                                 GHashTable **nodes,
                                 const pcmk__colocation_t *colocation,
                                 float factor, uint32_t flags)
 {
     GHashTable *work = NULL;
 
     CRM_ASSERT((source_rsc != NULL) && (nodes != NULL)
                && ((colocation != NULL)
                    || ((target_rsc == NULL) && (*nodes == NULL))));
 
     if (log_id == NULL) {
         log_id = source_rsc->id;
     }
 
     // Avoid infinite recursion
     if (pcmk_is_set(source_rsc->flags, pe_rsc_merging)) {
         pe_rsc_info(source_rsc, "%s: Breaking dependency loop at %s",
                     log_id, source_rsc->id);
         return;
     }
     pe__set_resource_flags(source_rsc, pe_rsc_merging);
 
     if (*nodes == NULL) {
         work = pcmk__copy_node_table(source_rsc->allowed_nodes);
         target_rsc = source_rsc;
     } else {
         const bool pos = pcmk_is_set(flags, pcmk__coloc_select_nonnegative);
 
         pe_rsc_trace(source_rsc, "%s: Merging %s scores from %s (at %.6f)",
                      log_id, (pos? "positive" : "all"), source_rsc->id, factor);
         work = pcmk__copy_node_table(*nodes);
         add_node_scores_matching_attr(work, source_rsc, target_rsc, colocation,
                                       factor, pos);
     }
 
     if (work == NULL) {
         pe__clear_resource_flags(source_rsc, pe_rsc_merging);
         return;
     }
 
     if (pcmk__any_node_available(work)) {
         GList *colocations = NULL;
 
         if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
             colocations = pcmk__this_with_colocations(source_rsc);
             pe_rsc_trace(source_rsc,
                          "Checking additional %d optional '%s with' "
                          "constraints",
                          g_list_length(colocations), source_rsc->id);
         } else {
             colocations = pcmk__with_this_colocations(source_rsc);
             pe_rsc_trace(source_rsc,
                          "Checking additional %d optional 'with %s' "
                          "constraints",
                          g_list_length(colocations), source_rsc->id);
         }
         flags |= pcmk__coloc_select_active;
 
         for (GList *iter = colocations; iter != NULL; iter = iter->next) {
             pcmk__colocation_t *constraint = iter->data;
 
             pe_resource_t *other = NULL;
             float other_factor = factor * constraint->score / (float) INFINITY;
 
             if (pcmk_is_set(flags, pcmk__coloc_select_this_with)) {
                 other = constraint->primary;
             } else if (!pcmk__colocation_has_influence(constraint, NULL)) {
                 continue;
             } else {
                 other = constraint->dependent;
             }
 
             pe_rsc_trace(source_rsc,
                          "Optionally merging score of '%s' constraint "
                          "(%s with %s)",
                          constraint->id, constraint->dependent->id,
                          constraint->primary->id);
             other->cmds->add_colocated_node_scores(other, target_rsc, log_id,
                                                    &work, constraint,
                                                    other_factor, flags);
             pe__show_node_scores(true, NULL, log_id, work, source_rsc->cluster);
         }
         g_list_free(colocations);
 
     } else if (pcmk_is_set(flags, pcmk__coloc_select_active)) {
         pe_rsc_info(source_rsc, "%s: Rolling back optional scores from %s",
                     log_id, source_rsc->id);
         g_hash_table_destroy(work);
         pe__clear_resource_flags(source_rsc, pe_rsc_merging);
         return;
     }
 
 
     if (pcmk_is_set(flags, pcmk__coloc_select_nonnegative)) {
         pe_node_t *node = NULL;
         GHashTableIter iter;
 
         g_hash_table_iter_init(&iter, work);
         while (g_hash_table_iter_next(&iter, NULL, (void **)&node)) {
             if (node->weight == INFINITY_HACK) {
                 node->weight = 1;
             }
         }
     }
 
     if (*nodes != NULL) {
        g_hash_table_destroy(*nodes);
     }
     *nodes = work;
 
     pe__clear_resource_flags(source_rsc, pe_rsc_merging);
 }
 
 /*!
  * \internal
  * \brief Apply a "with this" colocation to a resource's allowed node scores
  *
  * \param[in,out] data       Colocation to apply
  * \param[in,out] user_data  Resource being assigned
  */
 void
 pcmk__add_dependent_scores(gpointer data, gpointer user_data)
 {
     pcmk__colocation_t *colocation = data;
     pe_resource_t *target_rsc = user_data;
 
     pe_resource_t *source_rsc = colocation->dependent;
     const float factor = colocation->score / (float) INFINITY;
     uint32_t flags = pcmk__coloc_select_active;
 
     if (!pcmk__colocation_has_influence(colocation, NULL)) {
         return;
     }
     if (target_rsc->variant == pe_clone) {
         flags |= pcmk__coloc_select_nonnegative;
     }
     pe_rsc_trace(target_rsc,
                  "%s: Incorporating attenuated %s assignment scores due "
                  "to colocation %s",
                  target_rsc->id, source_rsc->id, colocation->id);
     source_rsc->cmds->add_colocated_node_scores(source_rsc, target_rsc,
                                                 source_rsc->id,
                                                 &target_rsc->allowed_nodes,
                                                 colocation, factor, flags);
 }
 
 /*!
  * \internal
  * \brief Exclude nodes from a dependent's node table if not in a given list
  *
  * Given a dependent resource in a colocation and a list of nodes where the
  * primary resource will run, set a node's score to \c -INFINITY in the
  * dependent's node table if not found in the primary nodes list.
  *
  * \param[in,out] dependent      Dependent resource
  * \param[in]     primary        Primary resource (for logging only)
  * \param[in]     colocation     Colocation constraint (for logging only)
  * \param[in]     primary_nodes  List of nodes where the primary will have
  *                               unblocked instances in a suitable role
  * \param[in]     merge_scores   If \c true and a node is found in both \p table
  *                               and \p list, add the node's score in \p list to
  *                               the node's score in \p table
  */
 void
 pcmk__colocation_intersect_nodes(pe_resource_t *dependent,
                                  const pe_resource_t *primary,
                                  const pcmk__colocation_t *colocation,
                                  const GList *primary_nodes, bool merge_scores)
 {
     GHashTableIter iter;
     pe_node_t *dependent_node = NULL;
 
     CRM_ASSERT((dependent != NULL) && (primary != NULL)
                && (colocation != NULL));
 
     g_hash_table_iter_init(&iter, dependent->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &dependent_node)) {
         const pe_node_t *primary_node = NULL;
 
         primary_node = pe_find_node_id(primary_nodes,
                                        dependent_node->details->id);
         if (primary_node == NULL) {
             dependent_node->weight = -INFINITY;
             pe_rsc_trace(dependent,
                          "Banning %s from %s (no primary instance) for %s",
                          dependent->id, pe__node_name(dependent_node),
                          colocation->id);
 
         } else if (merge_scores) {
             dependent_node->weight = pcmk__add_scores(dependent_node->weight,
                                                       primary_node->weight);
             pe_rsc_trace(dependent,
                          "Added %s's score %s to %s's score for %s (now %s) "
                          "for colocation %s",
                          primary->id, pcmk_readable_score(primary_node->weight),
                          dependent->id, pe__node_name(dependent_node),
                          pcmk_readable_score(dependent_node->weight),
                          colocation->id);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Get all colocations affecting a resource as the primary
  *
  * \param[in] rsc  Resource to get colocations for
  *
  * \return Newly allocated list of colocations affecting \p rsc as primary
  *
  * \note This is a convenience wrapper for the with_this_colocations() method.
  */
 GList *
 pcmk__with_this_colocations(const pe_resource_t *rsc)
 {
     GList *list = NULL;
 
     rsc->cmds->with_this_colocations(rsc, rsc, &list);
     return list;
 }
 
 /*!
  * \internal
  * \brief Get all colocations affecting a resource as the dependent
  *
  * \param[in] rsc  Resource to get colocations for
  *
  * \return Newly allocated list of colocations affecting \p rsc as dependent
  *
  * \note This is a convenience wrapper for the this_with_colocations() method.
  */
 GList *
 pcmk__this_with_colocations(const pe_resource_t *rsc)
 {
     GList *list = NULL;
 
     rsc->cmds->this_with_colocations(rsc, rsc, &list);
     return list;
 }
diff --git a/lib/pacemaker/pcmk_sched_tickets.c b/lib/pacemaker/pcmk_sched_tickets.c
index d45959af35..7e4ee265fc 100644
--- a/lib/pacemaker/pcmk_sched_tickets.c
+++ b/lib/pacemaker/pcmk_sched_tickets.c
@@ -1,530 +1,531 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdbool.h>
 #include <glib.h>
 
 #include <crm/crm.h>
+#include <crm/common/scheduler_internal.h>
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 enum loss_ticket_policy {
     loss_ticket_stop,
     loss_ticket_demote,
     loss_ticket_fence,
     loss_ticket_freeze
 };
 
 typedef struct {
     const char *id;
     pe_resource_t *rsc;
     pe_ticket_t *ticket;
     enum loss_ticket_policy loss_policy;
     int role;
 } rsc_ticket_t;
 
 /*!
  * \brief Check whether a ticket constraint matches a resource by role
  *
  * \param[in] rsc_ticket  Ticket constraint
  * \param[in] rsc         Resource to compare with ticket
  *
  * \param[in] true if constraint has no role or resource's role matches
  *            constraint's, otherwise false
  */
 static bool
 ticket_role_matches(const pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
 {
     if ((rsc_ticket->role == pcmk_role_unknown)
         || (rsc_ticket->role == rsc->role)) {
         return true;
     }
     pe_rsc_trace(rsc, "Skipping constraint: \"%s\" state filter",
                  role2text(rsc_ticket->role));
     return false;
 }
 
 /*!
  * \brief Create location constraints and fencing as needed for a ticket
  *
  * \param[in,out] rsc         Resource affected by ticket
  * \param[in]     rsc_ticket  Ticket
  */
 static void
 constraints_for_ticket(pe_resource_t *rsc, const rsc_ticket_t *rsc_ticket)
 {
     GList *iter = NULL;
 
     CRM_CHECK((rsc != NULL) && (rsc_ticket != NULL), return);
 
     if (rsc_ticket->ticket->granted && !rsc_ticket->ticket->standby) {
         return;
     }
 
     if (rsc->children) {
         pe_rsc_trace(rsc, "Processing ticket dependencies from %s", rsc->id);
         for (iter = rsc->children; iter != NULL; iter = iter->next) {
             constraints_for_ticket((pe_resource_t *) iter->data, rsc_ticket);
         }
         return;
     }
 
     pe_rsc_trace(rsc, "%s: Processing ticket dependency on %s (%s, %s)",
                  rsc->id, rsc_ticket->ticket->id, rsc_ticket->id,
                  role2text(rsc_ticket->role));
 
     if (!rsc_ticket->ticket->granted && (rsc->running_on != NULL)) {
 
         switch (rsc_ticket->loss_policy) {
             case loss_ticket_stop:
                 resource_location(rsc, NULL, -INFINITY, "__loss_of_ticket__",
                                   rsc->cluster);
                 break;
 
             case loss_ticket_demote:
                 // Promotion score will be set to -INFINITY in promotion_order()
                 if (rsc_ticket->role != pcmk_role_promoted) {
                     resource_location(rsc, NULL, -INFINITY,
                                       "__loss_of_ticket__", rsc->cluster);
                 }
                 break;
 
             case loss_ticket_fence:
                 if (!ticket_role_matches(rsc, rsc_ticket)) {
                     return;
                 }
 
                 resource_location(rsc, NULL, -INFINITY, "__loss_of_ticket__",
                                   rsc->cluster);
 
                 for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
                     pe_fence_node(rsc->cluster, (pe_node_t *) iter->data,
                                   "deadman ticket was lost", FALSE);
                 }
                 break;
 
             case loss_ticket_freeze:
                 if (!ticket_role_matches(rsc, rsc_ticket)) {
                     return;
                 }
                 if (rsc->running_on != NULL) {
                     pe__clear_resource_flags(rsc, pe_rsc_managed);
                     pe__set_resource_flags(rsc, pe_rsc_block);
                 }
                 break;
         }
 
     } else if (!rsc_ticket->ticket->granted) {
 
         if ((rsc_ticket->role != pcmk_role_promoted)
             || (rsc_ticket->loss_policy == loss_ticket_stop)) {
             resource_location(rsc, NULL, -INFINITY, "__no_ticket__",
                               rsc->cluster);
         }
 
     } else if (rsc_ticket->ticket->standby) {
 
         if ((rsc_ticket->role != pcmk_role_promoted)
             || (rsc_ticket->loss_policy == loss_ticket_stop)) {
             resource_location(rsc, NULL, -INFINITY, "__ticket_standby__",
                               rsc->cluster);
         }
     }
 }
 
 static void
 rsc_ticket_new(const char *id, pe_resource_t *rsc, pe_ticket_t *ticket,
                const char *state, const char *loss_policy)
 {
     rsc_ticket_t *new_rsc_ticket = NULL;
 
     if (rsc == NULL) {
         pcmk__config_err("Ignoring ticket '%s' because resource "
                          "does not exist", id);
         return;
     }
 
     new_rsc_ticket = calloc(1, sizeof(rsc_ticket_t));
     if (new_rsc_ticket == NULL) {
         return;
     }
 
     if (pcmk__str_eq(state, RSC_ROLE_STARTED_S,
                      pcmk__str_null_matches|pcmk__str_casei)) {
-        state = RSC_ROLE_UNKNOWN_S;
+        state = PCMK__ROLE_UNKNOWN;
     }
 
     new_rsc_ticket->id = id;
     new_rsc_ticket->ticket = ticket;
     new_rsc_ticket->rsc = rsc;
     new_rsc_ticket->role = text2role(state);
 
     if (pcmk__str_eq(loss_policy, "fence", pcmk__str_casei)) {
         if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
             new_rsc_ticket->loss_policy = loss_ticket_fence;
         } else {
             pcmk__config_err("Resetting '" XML_TICKET_ATTR_LOSS_POLICY
                              "' for ticket '%s' to 'stop' "
                              "because fencing is not configured", ticket->id);
             loss_policy = "stop";
         }
     }
 
     if (new_rsc_ticket->loss_policy == loss_ticket_fence) {
         crm_debug("On loss of ticket '%s': Fence the nodes running %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
                   role2text(new_rsc_ticket->role));
 
     } else if (pcmk__str_eq(loss_policy, "freeze", pcmk__str_casei)) {
         crm_debug("On loss of ticket '%s': Freeze %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
                   role2text(new_rsc_ticket->role));
         new_rsc_ticket->loss_policy = loss_ticket_freeze;
 
     } else if (pcmk__str_eq(loss_policy, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
         crm_debug("On loss of ticket '%s': Demote %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
                   role2text(new_rsc_ticket->role));
         new_rsc_ticket->loss_policy = loss_ticket_demote;
 
     } else if (pcmk__str_eq(loss_policy, "stop", pcmk__str_casei)) {
         crm_debug("On loss of ticket '%s': Stop %s (%s)",
                   new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
                   role2text(new_rsc_ticket->role));
         new_rsc_ticket->loss_policy = loss_ticket_stop;
 
     } else {
         if (new_rsc_ticket->role == pcmk_role_promoted) {
             crm_debug("On loss of ticket '%s': Default to demote %s (%s)",
                       new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
                       role2text(new_rsc_ticket->role));
             new_rsc_ticket->loss_policy = loss_ticket_demote;
 
         } else {
             crm_debug("On loss of ticket '%s': Default to stop %s (%s)",
                       new_rsc_ticket->ticket->id, new_rsc_ticket->rsc->id,
                       role2text(new_rsc_ticket->role));
             new_rsc_ticket->loss_policy = loss_ticket_stop;
         }
     }
 
     pe_rsc_trace(rsc, "%s (%s) ==> %s",
                  rsc->id, role2text(new_rsc_ticket->role), ticket->id);
 
     rsc->rsc_tickets = g_list_append(rsc->rsc_tickets, new_rsc_ticket);
 
     rsc->cluster->ticket_constraints = g_list_append(
         rsc->cluster->ticket_constraints, new_rsc_ticket);
 
     if (!(new_rsc_ticket->ticket->granted) || new_rsc_ticket->ticket->standby) {
         constraints_for_ticket(rsc, new_rsc_ticket);
     }
 }
 
 // \return Standard Pacemaker return code
 static int
 unpack_rsc_ticket_set(xmlNode *set, pe_ticket_t *ticket,
                       const char *loss_policy, pe_working_set_t *data_set)
 {
     const char *set_id = NULL;
     const char *role = NULL;
 
     CRM_CHECK(set != NULL, return EINVAL);
     CRM_CHECK(ticket != NULL, return EINVAL);
 
     set_id = ID(set);
     if (set_id == NULL) {
         pcmk__config_err("Ignoring <" XML_CONS_TAG_RSC_SET "> without "
                          XML_ATTR_ID);
         return pcmk_rc_unpack_error;
     }
 
     role = crm_element_value(set, "role");
 
     for (xmlNode *xml_rsc = first_named_child(set, XML_TAG_RESOURCE_REF);
          xml_rsc != NULL; xml_rsc = crm_next_same_xml(xml_rsc)) {
 
         pe_resource_t *resource = NULL;
 
         resource = pcmk__find_constraint_resource(data_set->resources,
                                                   ID(xml_rsc));
         if (resource == NULL) {
             pcmk__config_err("%s: No resource found for %s",
                              set_id, ID(xml_rsc));
             return pcmk_rc_unpack_error;
         }
         pe_rsc_trace(resource, "Resource '%s' depends on ticket '%s'",
                      resource->id, ticket->id);
         rsc_ticket_new(set_id, resource, ticket, role, loss_policy);
     }
 
     return pcmk_rc_ok;
 }
 
 static void
 unpack_simple_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     const char *id = NULL;
     const char *ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
     const char *loss_policy = crm_element_value(xml_obj,
                                                 XML_TICKET_ATTR_LOSS_POLICY);
 
     pe_ticket_t *ticket = NULL;
 
     const char *rsc_id = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
     const char *state = crm_element_value(xml_obj,
                                              XML_COLOC_ATTR_SOURCE_ROLE);
 
     // @COMPAT: Deprecated since 2.1.5
     const char *instance = crm_element_value(xml_obj,
                                              XML_COLOC_ATTR_SOURCE_INSTANCE);
 
     pe_resource_t *rsc = NULL;
 
     if (instance != NULL) {
         pe_warn_once(pe_wo_coloc_inst,
                      "Support for " XML_COLOC_ATTR_SOURCE_INSTANCE " is "
                      "deprecated and will be removed in a future release.");
     }
 
     CRM_CHECK(xml_obj != NULL, return);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          xml_obj->name);
         return;
     }
 
     if (ticket_str == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without ticket specified",
                          id);
         return;
     } else {
         ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
     }
 
     if (ticket == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because ticket '%s' "
                          "does not exist", id, ticket_str);
         return;
     }
 
     if (rsc_id == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without resource", id);
         return;
     } else {
         rsc = pcmk__find_constraint_resource(data_set->resources, rsc_id);
     }
 
     if (rsc == NULL) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "does not exist", id, rsc_id);
         return;
 
     } else if ((instance != NULL) && !pe_rsc_is_clone(rsc)) {
         pcmk__config_err("Ignoring constraint '%s' because resource '%s' "
                          "is not a clone but instance '%s' was requested",
                          id, rsc_id, instance);
         return;
     }
 
     if (instance != NULL) {
         rsc = find_clone_instance(rsc, instance);
         if (rsc == NULL) {
             pcmk__config_warn("Ignoring constraint '%s' because resource '%s' "
                               "does not have an instance '%s'",
                               "'%s'", id, rsc_id, instance);
             return;
         }
     }
 
     rsc_ticket_new(id, rsc, ticket, state, loss_policy);
 }
 
 // \return Standard Pacemaker return code
 static int
 unpack_rsc_ticket_tags(xmlNode *xml_obj, xmlNode **expanded_xml,
                        pe_working_set_t *data_set)
 {
     const char *id = NULL;
     const char *rsc_id = NULL;
     const char *state = NULL;
 
     pe_resource_t *rsc = NULL;
     pe_tag_t *tag = NULL;
 
     xmlNode *rsc_set = NULL;
 
     *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return EINVAL);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          xml_obj->name);
         return pcmk_rc_unpack_error;
     }
 
     // Check whether there are any resource sets with template or tag references
     *expanded_xml = pcmk__expand_tags_in_sets(xml_obj, data_set);
     if (*expanded_xml != NULL) {
         crm_log_xml_trace(*expanded_xml, "Expanded rsc_ticket");
         return pcmk_rc_ok;
     }
 
     rsc_id = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE);
     if (rsc_id == NULL) {
         return pcmk_rc_ok;
     }
 
     if (!pcmk__valid_resource_or_tag(data_set, rsc_id, &rsc, &tag)) {
         pcmk__config_err("Ignoring constraint '%s' because '%s' is not a "
                          "valid resource or tag", id, rsc_id);
         return pcmk_rc_unpack_error;
 
     } else if (rsc != NULL) {
         // No template or tag is referenced
         return pcmk_rc_ok;
     }
 
     state = crm_element_value(xml_obj, XML_COLOC_ATTR_SOURCE_ROLE);
 
     *expanded_xml = copy_xml(xml_obj);
 
     // Convert any template or tag reference in "rsc" into ticket resource_set
     if (!pcmk__tag_to_set(*expanded_xml, &rsc_set, XML_COLOC_ATTR_SOURCE,
                           false, data_set)) {
         free_xml(*expanded_xml);
         *expanded_xml = NULL;
         return pcmk_rc_unpack_error;
     }
 
     if (rsc_set != NULL) {
         if (state != NULL) {
             // Move "rsc-role" into converted resource_set as a "role" attribute
             crm_xml_add(rsc_set, "role", state);
             xml_remove_prop(*expanded_xml, XML_COLOC_ATTR_SOURCE_ROLE);
         }
 
     } else {
         free_xml(*expanded_xml);
         *expanded_xml = NULL;
     }
 
     return pcmk_rc_ok;
 }
 
 void
 pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set)
 {
     xmlNode *set = NULL;
     bool any_sets = false;
 
     const char *id = NULL;
     const char *ticket_str = NULL;
 
     pe_ticket_t *ticket = NULL;
 
     xmlNode *orig_xml = NULL;
     xmlNode *expanded_xml = NULL;
 
     CRM_CHECK(xml_obj != NULL, return);
 
     id = ID(xml_obj);
     if (id == NULL) {
         pcmk__config_err("Ignoring <%s> constraint without " XML_ATTR_ID,
                          xml_obj->name);
         return;
     }
 
     if (data_set->tickets == NULL) {
         data_set->tickets = pcmk__strkey_table(free, destroy_ticket);
     }
 
     ticket_str = crm_element_value(xml_obj, XML_TICKET_ATTR_TICKET);
     if (ticket_str == NULL) {
         pcmk__config_err("Ignoring constraint '%s' without ticket", id);
         return;
     } else {
         ticket = g_hash_table_lookup(data_set->tickets, ticket_str);
     }
 
     if (ticket == NULL) {
         ticket = ticket_new(ticket_str, data_set);
         if (ticket == NULL) {
             return;
         }
     }
 
     if (unpack_rsc_ticket_tags(xml_obj, &expanded_xml,
                                data_set) != pcmk_rc_ok) {
         return;
     }
     if (expanded_xml != NULL) {
         orig_xml = xml_obj;
         xml_obj = expanded_xml;
     }
 
     for (set = first_named_child(xml_obj, XML_CONS_TAG_RSC_SET); set != NULL;
          set = crm_next_same_xml(set)) {
 
         const char *loss_policy = NULL;
 
         any_sets = true;
         set = expand_idref(set, data_set->input);
         loss_policy = crm_element_value(xml_obj, XML_TICKET_ATTR_LOSS_POLICY);
 
         if ((set == NULL) // Configuration error, message already logged
             || (unpack_rsc_ticket_set(set, ticket, loss_policy,
                                       data_set) != pcmk_rc_ok)) {
             if (expanded_xml != NULL) {
                 free_xml(expanded_xml);
             }
             return;
         }
     }
 
     if (expanded_xml) {
         free_xml(expanded_xml);
         xml_obj = orig_xml;
     }
 
     if (!any_sets) {
         unpack_simple_rsc_ticket(xml_obj, data_set);
     }
 }
 
 /*!
  * \internal
  * \brief Ban resource from a node if it doesn't have a promotion ticket
  *
  * If a resource has tickets for the promoted role, and the ticket is either not
  * granted or set to standby, then ban the resource from all nodes.
  *
  * \param[in,out] rsc  Resource to check
  */
 void
 pcmk__require_promotion_tickets(pe_resource_t *rsc)
 {
     for (GList *item = rsc->rsc_tickets; item != NULL; item = item->next) {
         rsc_ticket_t *rsc_ticket = (rsc_ticket_t *) item->data;
 
         if ((rsc_ticket->role == pcmk_role_promoted)
             && (!rsc_ticket->ticket->granted || rsc_ticket->ticket->standby)) {
             resource_location(rsc, NULL, -INFINITY,
                               "__stateful_without_ticket__", rsc->cluster);
         }
     }
 }
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
index fbe4a7b561..367efd2a6c 100644
--- a/lib/pengine/common.c
+++ b/lib/pengine/common.c
@@ -1,627 +1,628 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/util.h>
 
 #include <glib.h>
 
+#include <crm/common/scheduler_internal.h>
 #include <crm/pengine/internal.h>
 
 gboolean was_processing_error = FALSE;
 gboolean was_processing_warning = FALSE;
 
 static bool
 check_placement_strategy(const char *value)
 {
     return pcmk__strcase_any_of(value, "default", "utilization", "minimal",
                            "balanced", NULL);
 }
 
 static pcmk__cluster_option_t pe_opts[] = {
     /* name, old name, type, allowed values,
      * default value, validator,
      * short description,
      * long description
      */
     {
         "no-quorum-policy", NULL, "select", "stop, freeze, ignore, demote, suicide",
         "stop", pcmk__valid_quorum,
         N_("What to do when the cluster does not have quorum"),
         NULL
     },
     {
         "symmetric-cluster", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether resources can run on any node by default"),
         NULL
     },
     {
         "maintenance-mode", NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether the cluster should refrain from monitoring, starting, "
             "and stopping resources"),
         NULL
     },
     {
         "start-failure-is-fatal", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether a start failure should prevent a resource from being "
             "recovered on the same node"),
         N_("When true, the cluster will immediately ban a resource from a node "
             "if it fails to start there. When false, the cluster will instead "
             "check the resource's fail count against its migration-threshold.")
     },
     {
         "enable-startup-probes", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether the cluster should check for active resources during start-up"),
         NULL
     },
     {
         XML_CONFIG_ATTR_SHUTDOWN_LOCK, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether to lock resources to a cleanly shut down node"),
         N_("When true, resources active on a node when it is cleanly shut down "
             "are kept \"locked\" to that node (not allowed to run elsewhere) "
             "until they start again on that node after it rejoins (or for at "
             "most shutdown-lock-limit, if set). Stonith resources and "
             "Pacemaker Remote connections are never locked. Clone and bundle "
             "instances and the promoted role of promotable clones are "
             "currently never locked, though support could be added in a future "
             "release.")
     },
     {
         XML_CONFIG_ATTR_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL,
         "0", pcmk__valid_interval_spec,
         N_("Do not lock resources to a cleanly shut down node longer than "
            "this"),
         N_("If shutdown-lock is true and this is set to a nonzero time "
             "duration, shutdown locks will expire after this much time has "
             "passed since the shutdown was initiated, even if the node has not "
             "rejoined.")
     },
 
     // Fencing-related options
     {
         "stonith-enabled", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("*** Advanced Use Only *** "
             "Whether nodes may be fenced as part of recovery"),
         N_("If false, unresponsive nodes are immediately assumed to be harmless, "
             "and resources that were active on them may be recovered "
             "elsewhere. This can result in a \"split-brain\" situation, "
             "potentially leading to data loss and/or service unavailability.")
     },
     {
         "stonith-action", NULL, "select", "reboot, off, poweroff",
         PCMK_ACTION_REBOOT, pcmk__is_fencing_action,
         N_("Action to send to fence device when a node needs to be fenced "
             "(\"poweroff\" is a deprecated alias for \"off\")"),
         NULL
     },
     {
         "stonith-timeout", NULL, "time", NULL,
         "60s", pcmk__valid_interval_spec,
         N_("*** Advanced Use Only *** Unused by Pacemaker"),
         N_("This value is not used by Pacemaker, but is kept for backward "
             "compatibility, and certain legacy fence agents might use it.")
     },
     {
         XML_ATTR_HAVE_WATCHDOG, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether watchdog integration is enabled"),
         N_("This is set automatically by the cluster according to whether SBD "
             "is detected to be in use. User-configured values are ignored. "
             "The value `true` is meaningful if diskless SBD is used and "
             "`stonith-watchdog-timeout` is nonzero. In that case, if fencing "
             "is required, watchdog-based self-fencing will be performed via "
             "SBD without requiring a fencing resource explicitly configured.")
     },
     {
         "concurrent-fencing", NULL, "boolean", NULL,
         PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean,
         N_("Allow performing fencing operations in parallel"),
         NULL
     },
     {
         "startup-fencing", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("*** Advanced Use Only *** Whether to fence unseen nodes at start-up"),
         N_("Setting this to false may lead to a \"split-brain\" situation,"
             "potentially leading to data loss and/or service unavailability.")
     },
     {
         XML_CONFIG_ATTR_PRIORITY_FENCING_DELAY, NULL, "time", NULL,
         "0", pcmk__valid_interval_spec,
         N_("Apply fencing delay targeting the lost nodes with the highest total resource priority"),
         N_("Apply specified delay for the fencings that are targeting the lost "
             "nodes with the highest total resource priority in case we don't "
             "have the majority of the nodes in our cluster partition, so that "
             "the more significant nodes potentially win any fencing match, "
             "which is especially meaningful under split-brain of 2-node "
             "cluster. A promoted resource instance takes the base priority + 1 "
             "on calculation if the base priority is not 0. Any static/random "
             "delays that are introduced by `pcmk_delay_base/max` configured "
             "for the corresponding fencing resources will be added to this "
             "delay. This delay should be significantly greater than, safely "
             "twice, the maximum `pcmk_delay_base/max`. By default, priority "
             "fencing delay is disabled.")
     },
 
     {
         XML_CONFIG_ATTR_NODE_PENDING_TIMEOUT, NULL, "time", NULL,
         "10min", pcmk__valid_interval_spec,
         N_("How long to wait for a node that has joined the cluster to join "
            "the process group"),
         N_("A node that has joined the cluster can be pending on joining the "
            "process group. We wait up to this much time for it. If it times "
            "out, fencing targeting the node will be issued if enabled.")
     },
     {
         "cluster-delay", NULL, "time", NULL,
         "60s", pcmk__valid_interval_spec,
         N_("Maximum time for node-to-node communication"),
         N_("The node elected Designated Controller (DC) will consider an action "
             "failed if it does not get a response from the node executing the "
             "action within this time (after considering the action's own "
             "timeout). The \"correct\" value will depend on the speed and "
             "load of your network and cluster nodes.")
     },
     {
         "batch-limit", NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("Maximum number of jobs that the cluster may execute in parallel "
             "across all nodes"),
         N_("The \"correct\" value will depend on the speed and load of your "
             "network and cluster nodes. If set to 0, the cluster will "
             "impose a dynamically calculated limit when any node has a "
             "high load.")
     },
     {
         "migration-limit", NULL, "integer", NULL,
         "-1", pcmk__valid_number,
         N_("The number of live migration actions that the cluster is allowed "
             "to execute in parallel on a node (-1 means no limit)")
     },
 
     /* Orphans and stopping */
     {
         "stop-all-resources", NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether the cluster should stop all active resources"),
         NULL
     },
     {
         "stop-orphan-resources", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether to stop resources that were removed from the configuration"),
         NULL
     },
     {
         "stop-orphan-actions", NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether to cancel recurring actions removed from the configuration"),
         NULL
     },
     {
         "remove-after-stop", NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("*** Deprecated *** Whether to remove stopped resources from "
             "the executor"),
         N_("Values other than default are poorly tested and potentially dangerous."
             " This option will be removed in a future release.")
     },
 
     /* Storing inputs */
     {
         "pe-error-series-max", NULL, "integer", NULL,
         "-1", pcmk__valid_number,
         N_("The number of scheduler inputs resulting in errors to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
     {
         "pe-warn-series-max",  NULL, "integer", NULL,
         "5000", pcmk__valid_number,
         N_("The number of scheduler inputs resulting in warnings to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
     {
         "pe-input-series-max", NULL, "integer", NULL,
         "4000", pcmk__valid_number,
         N_("The number of scheduler inputs without errors or warnings to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
 
     /* Node health */
     {
         PCMK__OPT_NODE_HEALTH_STRATEGY, NULL, "select",
         PCMK__VALUE_NONE ", " PCMK__VALUE_MIGRATE_ON_RED ", "
             PCMK__VALUE_ONLY_GREEN ", " PCMK__VALUE_PROGRESSIVE ", "
             PCMK__VALUE_CUSTOM,
         PCMK__VALUE_NONE, pcmk__validate_health_strategy,
         N_("How cluster should react to node health attributes"),
         N_("Requires external entities to create node attributes (named with "
             "the prefix \"#health\") with values \"red\", "
             "\"yellow\", or \"green\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_BASE, NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("Base health score assigned to a node"),
         N_("Only used when \"node-health-strategy\" is set to \"progressive\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_GREEN, NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("The score to use for a node health attribute whose value is \"green\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_YELLOW, NULL, "integer", NULL,
         "0", pcmk__valid_number,
         N_("The score to use for a node health attribute whose value is \"yellow\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
     {
         PCMK__OPT_NODE_HEALTH_RED, NULL, "integer", NULL,
         "-INFINITY", pcmk__valid_number,
         N_("The score to use for a node health attribute whose value is \"red\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
 
     /*Placement Strategy*/
     {
         "placement-strategy", NULL, "select",
         "default, utilization, minimal, balanced",
         "default", check_placement_strategy,
         N_("How the cluster should allocate resources to nodes"),
         NULL
     },
 };
 
 void
 pe_metadata(pcmk__output_t *out)
 {
     const char *desc_short = "Pacemaker scheduler options";
     const char *desc_long = "Cluster options used by Pacemaker's scheduler";
 
     gchar *s = pcmk__format_option_metadata("pacemaker-schedulerd", desc_short,
                                             desc_long, pe_opts,
                                             PCMK__NELEM(pe_opts));
     out->output_xml(out, "metadata", s);
     g_free(s);
 }
 
 void
 verify_pe_options(GHashTable * options)
 {
     pcmk__validate_cluster_options(options, pe_opts, PCMK__NELEM(pe_opts));
 }
 
 const char *
 pe_pref(GHashTable * options, const char *name)
 {
     return pcmk__cluster_option(options, pe_opts, PCMK__NELEM(pe_opts), name);
 }
 
 const char *
 fail2text(enum action_fail_response fail)
 {
     const char *result = "<unknown>";
 
     switch (fail) {
         case action_fail_ignore:
             result = "ignore";
             break;
         case action_fail_demote:
             result = "demote";
             break;
         case action_fail_block:
             result = "block";
             break;
         case action_fail_recover:
             result = "recover";
             break;
         case action_fail_migrate:
             result = "migrate";
             break;
         case action_fail_stop:
             result = "stop";
             break;
         case action_fail_fence:
             result = "fence";
             break;
         case action_fail_standby:
             result = "standby";
             break;
         case action_fail_restart_container:
             result = "restart-container";
             break;
         case action_fail_reset_remote:
             result = "reset-remote";
             break;
     }
     return result;
 }
 
 enum action_tasks
 text2task(const char *task)
 {
     if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)) {
         return stop_rsc;
     } else if (pcmk__str_eq(task, PCMK_ACTION_STOPPED, pcmk__str_casei)) {
         return stopped_rsc;
     } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_casei)) {
         return start_rsc;
     } else if (pcmk__str_eq(task, PCMK_ACTION_RUNNING, pcmk__str_casei)) {
         return started_rsc;
     } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_casei)) {
         return shutdown_crm;
     } else if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
         return stonith_node;
     } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
         return monitor_rsc;
     } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
         return action_notify;
     } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFIED, pcmk__str_casei)) {
         return action_notified;
     } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
         return action_promote;
     } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
         return action_demote;
     } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTED, pcmk__str_casei)) {
         return action_promoted;
     } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTED, pcmk__str_casei)) {
         return action_demoted;
     }
 #if SUPPORT_TRACING
     if (pcmk__str_eq(task, PCMK_ACTION_CANCEL, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, PCMK_ACTION_DELETE, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, PCMK_ACTION_MIGRATE_TO, pcmk__str_casei)) {
         return no_action;
     } else if (pcmk__str_eq(task, PCMK_ACTION_MIGRATE_FROM, pcmk__str_casei)) {
         return no_action;
     }
     crm_trace("Unsupported action: %s", task);
 #endif
 
     return no_action;
 }
 
 const char *
 task2text(enum action_tasks task)
 {
     const char *result = "<unknown>";
 
     switch (task) {
         case no_action:
             result = "no_action";
             break;
         case stop_rsc:
             result = PCMK_ACTION_STOP;
             break;
         case stopped_rsc:
             result = PCMK_ACTION_STOPPED;
             break;
         case start_rsc:
             result = PCMK_ACTION_START;
             break;
         case started_rsc:
             result = PCMK_ACTION_RUNNING;
             break;
         case shutdown_crm:
             result = PCMK_ACTION_DO_SHUTDOWN;
             break;
         case stonith_node:
             result = PCMK_ACTION_STONITH;
             break;
         case monitor_rsc:
             result = PCMK_ACTION_MONITOR;
             break;
         case action_notify:
             result = PCMK_ACTION_NOTIFY;
             break;
         case action_notified:
             result = PCMK_ACTION_NOTIFIED;
             break;
         case action_promote:
             result = PCMK_ACTION_PROMOTE;
             break;
         case action_promoted:
             result = PCMK_ACTION_PROMOTED;
             break;
         case action_demote:
             result = PCMK_ACTION_DEMOTE;
             break;
         case action_demoted:
             result = PCMK_ACTION_DEMOTED;
             break;
     }
 
     return result;
 }
 
 const char *
 role2text(enum rsc_role_e role)
 {
     switch (role) {
         case pcmk_role_stopped:
             return RSC_ROLE_STOPPED_S;
 
         case pcmk_role_started:
             return RSC_ROLE_STARTED_S;
 
         case pcmk_role_unpromoted:
 #ifdef PCMK__COMPAT_2_0
             return RSC_ROLE_UNPROMOTED_LEGACY_S;
 #else
             return RSC_ROLE_UNPROMOTED_S;
 #endif
 
         case pcmk_role_promoted:
 #ifdef PCMK__COMPAT_2_0
             return RSC_ROLE_PROMOTED_LEGACY_S;
 #else
             return RSC_ROLE_PROMOTED_S;
 #endif
 
         default: // pcmk_role_unknown
-            return RSC_ROLE_UNKNOWN_S;
+            return PCMK__ROLE_UNKNOWN;
     }
 }
 
 enum rsc_role_e
 text2role(const char *role)
 {
     CRM_ASSERT(role != NULL);
     if (pcmk__str_eq(role, RSC_ROLE_STOPPED_S, pcmk__str_casei)) {
         return pcmk_role_stopped;
     } else if (pcmk__str_eq(role, RSC_ROLE_STARTED_S, pcmk__str_casei)) {
         return pcmk_role_started;
     } else if (pcmk__strcase_any_of(role, RSC_ROLE_UNPROMOTED_S,
                                     RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
         return pcmk_role_unpromoted;
     } else if (pcmk__strcase_any_of(role, RSC_ROLE_PROMOTED_S,
                                     RSC_ROLE_PROMOTED_LEGACY_S, NULL)) {
         return pcmk_role_promoted;
-    } else if (pcmk__str_eq(role, RSC_ROLE_UNKNOWN_S, pcmk__str_casei)) {
+    } else if (pcmk__str_eq(role, PCMK__ROLE_UNKNOWN, pcmk__str_casei)) {
         return pcmk_role_unknown;
     }
     crm_err("Unknown role: %s", role);
     return pcmk_role_unknown;
 }
 
 void
 add_hash_param(GHashTable * hash, const char *name, const char *value)
 {
     CRM_CHECK(hash != NULL, return);
 
     crm_trace("Adding name='%s' value='%s' to hash table",
               pcmk__s(name, "<null>"), pcmk__s(value, "<null>"));
     if (name == NULL || value == NULL) {
         return;
 
     } else if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
         return;
 
     } else if (g_hash_table_lookup(hash, name) == NULL) {
         g_hash_table_insert(hash, strdup(name), strdup(value));
     }
 }
 
 /*!
  * \internal
  * \brief Look up an attribute value on the appropriate node
  *
  * If \p node is a guest node and either the \c XML_RSC_ATTR_TARGET meta
  * attribute is set to "host" for \p rsc or \p force_host is \c true, query the
  * attribute on the node's host. Otherwise, query the attribute on \p node
  * itself.
  *
  * \param[in] node        Node to query attribute value on by default
  * \param[in] name        Name of attribute to query
  * \param[in] rsc         Resource on whose behalf we're querying
  * \param[in] node_type   Type of resource location lookup
  * \param[in] force_host  Force a lookup on the guest node's host, regardless of
  *                        the \c XML_RSC_ATTR_TARGET value
  *
  * \return Value of the attribute on \p node or on the host of \p node
  *
  * \note If \p force_host is \c true, \p node \e must be a guest node.
  */
 const char *
 pe__node_attribute_calculated(const pe_node_t *node, const char *name,
                               const pe_resource_t *rsc,
                               enum pe__rsc_node node_type,
                               bool force_host)
 {
     // @TODO: Use pe__is_guest_node() after merging libpe_{rules,status}
     bool is_guest = (node != NULL) && (node->details->type == node_remote)
                     && (node->details->remote_rsc != NULL)
                     && (node->details->remote_rsc->container != NULL);
     const char *source = NULL;
     const char *node_type_s = NULL;
     const char *reason = NULL;
 
     const pe_resource_t *container = NULL;
     const pe_node_t *host = NULL;
 
     CRM_ASSERT((node != NULL) && (name != NULL) && (rsc != NULL)
                && (!force_host || is_guest));
 
     /* Ignore XML_RSC_ATTR_TARGET if node is not a guest node. This represents a
      * user configuration error.
      */
     source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
     if (!force_host
         && (!is_guest || !pcmk__str_eq(source, "host", pcmk__str_casei))) {
 
         return g_hash_table_lookup(node->details->attrs, name);
     }
 
     container = node->details->remote_rsc->container;
 
     switch (node_type) {
         case pe__rsc_node_assigned:
             node_type_s = "assigned";
             host = container->allocated_to;
             if (host == NULL) {
                 reason = "not assigned";
             }
             break;
 
         case pe__rsc_node_current:
             node_type_s = "current";
 
             if (container->running_on != NULL) {
                 host = container->running_on->data;
             }
             if (host == NULL) {
                 reason = "inactive";
             }
             break;
 
         default:
             // Add support for other enum pe__rsc_node values if needed
             CRM_ASSERT(false);
             break;
     }
 
     if (host != NULL) {
         const char *value = g_hash_table_lookup(host->details->attrs, name);
 
         pe_rsc_trace(rsc,
                      "%s: Value lookup for %s on %s container host %s %s%s",
                      rsc->id, name, node_type_s, pe__node_name(host),
                      ((value != NULL)? "succeeded: " : "failed"),
                      pcmk__s(value, ""));
         return value;
     }
     pe_rsc_trace(rsc,
                  "%s: Not looking for %s on %s container host: %s is %s",
                  rsc->id, name, node_type_s, container->id, reason);
     return NULL;
 }
 
 const char *
 pe_node_attribute_raw(const pe_node_t *node, const char *name)
 {
     if(node == NULL) {
         return NULL;
     }
     return g_hash_table_lookup(node->details->attrs, name);
 }
diff --git a/lib/pengine/complex.c b/lib/pengine/complex.c
index 94b3d35af9..d879e3f93d 100644
--- a/lib/pengine/complex.c
+++ b/lib/pengine/complex.c
@@ -1,1177 +1,1178 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml_internal.h>
+#include <crm/common/scheduler_internal.h>
 
 #include "pe_status_private.h"
 
 void populate_hash(xmlNode * nvpair_list, GHashTable * hash, const char **attrs, int attrs_length);
 
 static pe_node_t *active_node(const pe_resource_t *rsc, unsigned int *count_all,
                               unsigned int *count_clean);
 
 resource_object_functions_t resource_class_functions[] = {
     {
          native_unpack,
          native_find_rsc,
          native_parameter,
          native_print,
          native_active,
          native_resource_state,
          native_location,
          native_free,
          pe__count_common,
          pe__native_is_filtered,
          active_node,
          pe__primitive_max_per_node,
     },
     {
          group_unpack,
          native_find_rsc,
          native_parameter,
          group_print,
          group_active,
          group_resource_state,
          native_location,
          group_free,
          pe__count_common,
          pe__group_is_filtered,
          active_node,
          pe__group_max_per_node,
     },
     {
          clone_unpack,
          native_find_rsc,
          native_parameter,
          clone_print,
          clone_active,
          clone_resource_state,
          native_location,
          clone_free,
          pe__count_common,
          pe__clone_is_filtered,
          active_node,
          pe__clone_max_per_node,
     },
     {
          pe__unpack_bundle,
          native_find_rsc,
          native_parameter,
          pe__print_bundle,
          pe__bundle_active,
          pe__bundle_resource_state,
          native_location,
          pe__free_bundle,
          pe__count_bundle,
          pe__bundle_is_filtered,
          pe__bundle_active_node,
          pe__bundle_max_per_node,
     }
 };
 
 static enum pe_obj_types
 get_resource_type(const char *name)
 {
     if (pcmk__str_eq(name, XML_CIB_TAG_RESOURCE, pcmk__str_casei)) {
         return pe_native;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_GROUP, pcmk__str_casei)) {
         return pe_group;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_INCARNATION, pcmk__str_casei)) {
         return pe_clone;
 
     } else if (pcmk__str_eq(name, PCMK_XE_PROMOTABLE_LEGACY, pcmk__str_casei)) {
         // @COMPAT deprecated since 2.0.0
         return pe_clone;
 
     } else if (pcmk__str_eq(name, XML_CIB_TAG_CONTAINER, pcmk__str_casei)) {
         return pe_container;
     }
 
     return pe_unknown;
 }
 
 static void
 dup_attr(gpointer key, gpointer value, gpointer user_data)
 {
     add_hash_param(user_data, key, value);
 }
 
 static void
 expand_parents_fixed_nvpairs(pe_resource_t * rsc, pe_rule_eval_data_t * rule_data, GHashTable * meta_hash, pe_working_set_t * data_set)
 {
     GHashTable *parent_orig_meta = pcmk__strkey_table(free, free);
     pe_resource_t *p = rsc->parent;
 
     if (p == NULL) {
         return ;
     }
 
     /* Search all parent resources, get the fixed value of "meta_attributes" set only in the original xml, and stack it in the hash table. */
     /* The fixed value of the lower parent resource takes precedence and is not overwritten. */
     while(p != NULL) {
         /* A hash table for comparison is generated, including the id-ref. */
         pe__unpack_dataset_nvpairs(p->xml, XML_TAG_META_SETS,
                                rule_data, parent_orig_meta, NULL, FALSE, data_set);
         p = p->parent; 
     }
 
     /* If there is a fixed value of "meta_attributes" of the parent resource, it will be processed. */
     if (parent_orig_meta != NULL) {
         GHashTableIter iter;
         char *key = NULL;
         char *value = NULL;
 
         g_hash_table_iter_init(&iter, parent_orig_meta);
         while (g_hash_table_iter_next(&iter, (gpointer *) &key, (gpointer *) &value)) {
             /* Parameters set in the original xml of the parent resource will also try to overwrite the child resource. */
             /* Attributes that already exist in the child lease are not updated. */
             dup_attr(key, value, meta_hash);
         }
     }
 
     if (parent_orig_meta != NULL) {
         g_hash_table_destroy(parent_orig_meta);
     }
     
     return ;
 
 }
 void
 get_meta_attributes(GHashTable * meta_hash, pe_resource_t * rsc,
                     pe_node_t * node, pe_working_set_t * data_set)
 {
     pe_rsc_eval_data_t rsc_rule_data = {
         .standard = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS),
         .provider = crm_element_value(rsc->xml, XML_AGENT_ATTR_PROVIDER),
         .agent = crm_element_value(rsc->xml, XML_EXPR_ATTR_TYPE)
     };
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = pcmk_role_unknown,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = &rsc_rule_data,
         .op_data = NULL
     };
 
     if (node) {
         rule_data.node_hash = node->details->attrs;
     }
 
     for (xmlAttrPtr a = pcmk__xe_first_attr(rsc->xml); a != NULL; a = a->next) {
         const char *prop_name = (const char *) a->name;
         const char *prop_value = pcmk__xml_attr_value(a);
 
         add_hash_param(meta_hash, prop_name, prop_value);
     }
 
     pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_META_SETS, &rule_data,
                                meta_hash, NULL, FALSE, data_set);
 
     /* Set the "meta_attributes" explicitly set in the parent resource to the hash table of the child resource. */
     /* If it is already explicitly set as a child, it will not be overwritten. */
     if (rsc->parent != NULL) {
         expand_parents_fixed_nvpairs(rsc, &rule_data, meta_hash, data_set);
     }
 
     /* check the defaults */
     pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_META_SETS,
                                &rule_data, meta_hash, NULL, FALSE, data_set);
 
     /* If there is "meta_attributes" that the parent resource has not explicitly set, set a value that is not set from rsc_default either. */
     /* The values already set up to this point will not be overwritten. */
     if (rsc->parent) {
         g_hash_table_foreach(rsc->parent->meta, dup_attr, meta_hash);
     }
 }
 
 void
 get_rsc_attributes(GHashTable *meta_hash, const pe_resource_t *rsc,
                    const pe_node_t *node, pe_working_set_t *data_set)
 {
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = pcmk_role_unknown,
         .now = data_set->now,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     if (node) {
         rule_data.node_hash = node->details->attrs;
     }
 
     pe__unpack_dataset_nvpairs(rsc->xml, XML_TAG_ATTR_SETS, &rule_data,
                                meta_hash, NULL, FALSE, data_set);
 
     /* set anything else based on the parent */
     if (rsc->parent != NULL) {
         get_rsc_attributes(meta_hash, rsc->parent, node, data_set);
 
     } else {
         /* and finally check the defaults */
         pe__unpack_dataset_nvpairs(data_set->rsc_defaults, XML_TAG_ATTR_SETS,
                                    &rule_data, meta_hash, NULL, FALSE, data_set);
     }
 }
 
 static char *
 template_op_key(xmlNode * op)
 {
     const char *name = crm_element_value(op, "name");
     const char *role = crm_element_value(op, "role");
     char *key = NULL;
 
     if ((role == NULL)
         || pcmk__strcase_any_of(role, RSC_ROLE_STARTED_S, RSC_ROLE_UNPROMOTED_S,
                                 RSC_ROLE_UNPROMOTED_LEGACY_S, NULL)) {
-        role = RSC_ROLE_UNKNOWN_S;
+        role = PCMK__ROLE_UNKNOWN;
     }
 
     key = crm_strdup_printf("%s-%s", name, role);
     return key;
 }
 
 static gboolean
 unpack_template(xmlNode * xml_obj, xmlNode ** expanded_xml, pe_working_set_t * data_set)
 {
     xmlNode *cib_resources = NULL;
     xmlNode *template = NULL;
     xmlNode *new_xml = NULL;
     xmlNode *child_xml = NULL;
     xmlNode *rsc_ops = NULL;
     xmlNode *template_ops = NULL;
     const char *template_ref = NULL;
     const char *clone = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for template unpacking");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", xml_obj->name);
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     cib_resources = get_xpath_object("//"XML_CIB_TAG_RESOURCES, data_set->input, LOG_TRACE);
     if (cib_resources == NULL) {
         pe_err("No resources configured");
         return FALSE;
     }
 
     template = pcmk__xe_match(cib_resources, XML_CIB_TAG_RSC_TEMPLATE,
                               XML_ATTR_ID, template_ref);
     if (template == NULL) {
         pe_err("No template named '%s'", template_ref);
         return FALSE;
     }
 
     new_xml = copy_xml(template);
     xmlNodeSetName(new_xml, xml_obj->name);
     crm_xml_replace(new_xml, XML_ATTR_ID, id);
 
     clone = crm_element_value(xml_obj, XML_RSC_ATTR_INCARNATION);
     if(clone) {
         crm_xml_add(new_xml, XML_RSC_ATTR_INCARNATION, clone);
     }
 
     template_ops = find_xml_node(new_xml, "operations", FALSE);
 
     for (child_xml = pcmk__xe_first_child(xml_obj); child_xml != NULL;
          child_xml = pcmk__xe_next(child_xml)) {
         xmlNode *new_child = NULL;
 
         new_child = add_node_copy(new_xml, child_xml);
 
         if (pcmk__str_eq((const char *)new_child->name, "operations", pcmk__str_none)) {
             rsc_ops = new_child;
         }
     }
 
     if (template_ops && rsc_ops) {
         xmlNode *op = NULL;
         GHashTable *rsc_ops_hash = pcmk__strkey_table(free, NULL);
 
         for (op = pcmk__xe_first_child(rsc_ops); op != NULL;
              op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             g_hash_table_insert(rsc_ops_hash, key, op);
         }
 
         for (op = pcmk__xe_first_child(template_ops); op != NULL;
              op = pcmk__xe_next(op)) {
 
             char *key = template_op_key(op);
 
             if (g_hash_table_lookup(rsc_ops_hash, key) == NULL) {
                 add_node_copy(rsc_ops, op);
             }
 
             free(key);
         }
 
         if (rsc_ops_hash) {
             g_hash_table_destroy(rsc_ops_hash);
         }
 
         free_xml(template_ops);
     }
 
     /*free_xml(*expanded_xml); */
     *expanded_xml = new_xml;
 
     /* Disable multi-level templates for now */
     /*if(unpack_template(new_xml, expanded_xml, data_set) == FALSE) {
        free_xml(*expanded_xml);
        *expanded_xml = NULL;
 
        return FALSE;
        } */
 
     return TRUE;
 }
 
 static gboolean
 add_template_rsc(xmlNode * xml_obj, pe_working_set_t * data_set)
 {
     const char *template_ref = NULL;
     const char *id = NULL;
 
     if (xml_obj == NULL) {
         pe_err("No resource object for processing resource list of template");
         return FALSE;
     }
 
     template_ref = crm_element_value(xml_obj, XML_CIB_TAG_RSC_TEMPLATE);
     if (template_ref == NULL) {
         return TRUE;
     }
 
     id = ID(xml_obj);
     if (id == NULL) {
         pe_err("'%s' object must have a id", xml_obj->name);
         return FALSE;
     }
 
     if (pcmk__str_eq(template_ref, id, pcmk__str_none)) {
         pe_err("The resource object '%s' should not reference itself", id);
         return FALSE;
     }
 
     if (add_tag_ref(data_set->template_rsc_sets, template_ref, id) == FALSE) {
         return FALSE;
     }
 
     return TRUE;
 }
 
 static bool
 detect_promotable(pe_resource_t *rsc)
 {
     const char *promotable = g_hash_table_lookup(rsc->meta,
                                                  XML_RSC_ATTR_PROMOTABLE);
 
     if (crm_is_true(promotable)) {
         return TRUE;
     }
 
     // @COMPAT deprecated since 2.0.0
     if (pcmk__xe_is(rsc->xml, PCMK_XE_PROMOTABLE_LEGACY)) {
         /* @TODO in some future version, pe_warn_once() here,
          *       then drop support in even later version
          */
         g_hash_table_insert(rsc->meta, strdup(XML_RSC_ATTR_PROMOTABLE),
                             strdup(XML_BOOLEAN_TRUE));
         return TRUE;
     }
     return FALSE;
 }
 
 static void
 free_params_table(gpointer data)
 {
     g_hash_table_destroy((GHashTable *) data);
 }
 
 /*!
  * \brief Get a table of resource parameters
  *
  * \param[in,out] rsc       Resource to query
  * \param[in]     node      Node for evaluating rules (NULL for defaults)
  * \param[in,out] data_set  Cluster working set
  *
  * \return Hash table containing resource parameter names and values
  *         (or NULL if \p rsc or \p data_set is NULL)
  * \note The returned table will be destroyed when the resource is freed, so
  *       callers should not destroy it.
  */
 GHashTable *
 pe_rsc_params(pe_resource_t *rsc, const pe_node_t *node,
               pe_working_set_t *data_set)
 {
     GHashTable *params_on_node = NULL;
 
     /* A NULL node is used to request the resource's default parameters
      * (not evaluated for node), but we always want something non-NULL
      * as a hash table key.
      */
     const char *node_name = "";
 
     // Sanity check
     if ((rsc == NULL) || (data_set == NULL)) {
         return NULL;
     }
     if ((node != NULL) && (node->details->uname != NULL)) {
         node_name = node->details->uname;
     }
 
     // Find the parameter table for given node
     if (rsc->parameter_cache == NULL) {
         rsc->parameter_cache = pcmk__strikey_table(free, free_params_table);
     } else {
         params_on_node = g_hash_table_lookup(rsc->parameter_cache, node_name);
     }
 
     // If none exists yet, create one with parameters evaluated for node
     if (params_on_node == NULL) {
         params_on_node = pcmk__strkey_table(free, free);
         get_rsc_attributes(params_on_node, rsc, node, data_set);
         g_hash_table_insert(rsc->parameter_cache, strdup(node_name),
                             params_on_node);
     }
     return params_on_node;
 }
 
 /*!
  * \internal
  * \brief Unpack a resource's "requires" meta-attribute
  *
  * \param[in,out] rsc         Resource being unpacked
  * \param[in]     value       Value of "requires" meta-attribute
  * \param[in]     is_default  Whether \p value was selected by default
  */
 static void
 unpack_requires(pe_resource_t *rsc, const char *value, bool is_default)
 {
     if (pcmk__str_eq(value, PCMK__VALUE_NOTHING, pcmk__str_casei)) {
 
     } else if (pcmk__str_eq(value, PCMK__VALUE_QUORUM, pcmk__str_casei)) {
         pe__set_resource_flags(rsc, pe_rsc_needs_quorum);
 
     } else if (pcmk__str_eq(value, PCMK__VALUE_FENCING, pcmk__str_casei)) {
         pe__set_resource_flags(rsc, pe_rsc_needs_fencing);
         if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
             pcmk__config_warn("%s requires fencing but fencing is disabled",
                               rsc->id);
         }
 
     } else if (pcmk__str_eq(value, PCMK__VALUE_UNFENCING, pcmk__str_casei)) {
         if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
             pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
                               "to \"" PCMK__VALUE_QUORUM "\" because fencing "
                               "devices cannot require unfencing", rsc->id);
             unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
             return;
 
         } else if (!pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
             pcmk__config_warn("Resetting \"" XML_RSC_ATTR_REQUIRES "\" for %s "
                               "to \"" PCMK__VALUE_QUORUM "\" because fencing "
                               "is disabled", rsc->id);
             unpack_requires(rsc, PCMK__VALUE_QUORUM, true);
             return;
 
         } else {
             pe__set_resource_flags(rsc,
                                    pe_rsc_needs_fencing|pe_rsc_needs_unfencing);
         }
 
     } else {
         const char *orig_value = value;
 
         if (pcmk_is_set(rsc->flags, pe_rsc_fence_device)) {
             value = PCMK__VALUE_QUORUM;
 
         } else if ((rsc->variant == pe_native)
                    && xml_contains_remote_node(rsc->xml)) {
             value = PCMK__VALUE_QUORUM;
 
         } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_enable_unfencing)) {
             value = PCMK__VALUE_UNFENCING;
 
         } else if (pcmk_is_set(rsc->cluster->flags, pe_flag_stonith_enabled)) {
             value = PCMK__VALUE_FENCING;
 
         } else if (rsc->cluster->no_quorum_policy == no_quorum_ignore) {
             value = PCMK__VALUE_NOTHING;
 
         } else {
             value = PCMK__VALUE_QUORUM;
         }
 
         if (orig_value != NULL) {
             pcmk__config_err("Resetting '" XML_RSC_ATTR_REQUIRES "' for %s "
                              "to '%s' because '%s' is not valid",
                               rsc->id, value, orig_value);
         }
         unpack_requires(rsc, value, true);
         return;
     }
 
     pe_rsc_trace(rsc, "\tRequired to start: %s%s", value,
                  (is_default? " (default)" : ""));
 }
 
 #ifndef PCMK__COMPAT_2_0
 static void
 warn_about_deprecated_classes(pe_resource_t *rsc)
 {
     const char *std = crm_element_value(rsc->xml, XML_AGENT_ATTR_CLASS);
 
     if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_UPSTART, pcmk__str_none)) {
         pe_warn_once(pe_wo_upstart,
                      "Support for Upstart resources (such as %s) is deprecated "
                      "and will be removed in a future release of Pacemaker",
                      rsc->id);
 
     } else if (pcmk__str_eq(std, PCMK_RESOURCE_CLASS_NAGIOS, pcmk__str_none)) {
         pe_warn_once(pe_wo_nagios,
                      "Support for Nagios resources (such as %s) is deprecated "
                      "and will be removed in a future release of Pacemaker",
                      rsc->id);
     }
 }
 #endif
 
 /*!
  * \internal
  * \brief Unpack configuration XML for a given resource
  *
  * Unpack the XML object containing a resource's configuration into a new
  * \c pe_resource_t object.
  *
  * \param[in]     xml_obj   XML node containing the resource's configuration
  * \param[out]    rsc       Where to store the unpacked resource information
  * \param[in]     parent    Resource's parent, if any
  * \param[in,out] data_set  Cluster working set
  *
  * \return Standard Pacemaker return code
  * \note If pcmk_rc_ok is returned, \p *rsc is guaranteed to be non-NULL, and
  *       the caller is responsible for freeing it using its variant-specific
  *       free() method. Otherwise, \p *rsc is guaranteed to be NULL.
  */
 int
 pe__unpack_resource(xmlNode *xml_obj, pe_resource_t **rsc,
                     pe_resource_t *parent, pe_working_set_t *data_set)
 {
     xmlNode *expanded_xml = NULL;
     xmlNode *ops = NULL;
     const char *value = NULL;
     const char *id = NULL;
     bool guest_node = false;
     bool remote_node = false;
 
     pe_rule_eval_data_t rule_data = {
         .node_hash = NULL,
         .role = pcmk_role_unknown,
         .now = NULL,
         .match_data = NULL,
         .rsc_data = NULL,
         .op_data = NULL
     };
 
     CRM_CHECK(rsc != NULL, return EINVAL);
     CRM_CHECK((xml_obj != NULL) && (data_set != NULL),
               *rsc = NULL;
               return EINVAL);
 
     rule_data.now = data_set->now;
 
     crm_log_xml_trace(xml_obj, "[raw XML]");
 
     id = crm_element_value(xml_obj, XML_ATTR_ID);
     if (id == NULL) {
         pe_err("Ignoring <%s> configuration without " XML_ATTR_ID,
                xml_obj->name);
         return pcmk_rc_unpack_error;
     }
 
     if (unpack_template(xml_obj, &expanded_xml, data_set) == FALSE) {
         return pcmk_rc_unpack_error;
     }
 
     *rsc = calloc(1, sizeof(pe_resource_t));
     if (*rsc == NULL) {
         crm_crit("Unable to allocate memory for resource '%s'", id);
         return ENOMEM;
     }
     (*rsc)->cluster = data_set;
 
     if (expanded_xml) {
         crm_log_xml_trace(expanded_xml, "[expanded XML]");
         (*rsc)->xml = expanded_xml;
         (*rsc)->orig_xml = xml_obj;
 
     } else {
         (*rsc)->xml = xml_obj;
         (*rsc)->orig_xml = NULL;
     }
 
     /* Do not use xml_obj from here on, use (*rsc)->xml in case templates are involved */
 
     (*rsc)->parent = parent;
 
     ops = find_xml_node((*rsc)->xml, "operations", FALSE);
     (*rsc)->ops_xml = expand_idref(ops, data_set->input);
 
     (*rsc)->variant = get_resource_type((const char *) (*rsc)->xml->name);
     if ((*rsc)->variant == pe_unknown) {
         pe_err("Ignoring resource '%s' of unknown type '%s'",
                id, (*rsc)->xml->name);
         common_free(*rsc);
         *rsc = NULL;
         return pcmk_rc_unpack_error;
     }
 
 #ifndef PCMK__COMPAT_2_0
     warn_about_deprecated_classes(*rsc);
 #endif
 
     (*rsc)->meta = pcmk__strkey_table(free, free);
     (*rsc)->allowed_nodes = pcmk__strkey_table(NULL, free);
     (*rsc)->known_on = pcmk__strkey_table(NULL, free);
 
     value = crm_element_value((*rsc)->xml, XML_RSC_ATTR_INCARNATION);
     if (value) {
         (*rsc)->id = crm_strdup_printf("%s:%s", id, value);
         add_hash_param((*rsc)->meta, XML_RSC_ATTR_INCARNATION, value);
 
     } else {
         (*rsc)->id = strdup(id);
     }
 
     (*rsc)->fns = &resource_class_functions[(*rsc)->variant];
 
     get_meta_attributes((*rsc)->meta, *rsc, NULL, data_set);
     (*rsc)->parameters = pe_rsc_params(*rsc, NULL, data_set); // \deprecated
 
     (*rsc)->flags = 0;
     pe__set_resource_flags(*rsc, pe_rsc_runnable|pe_rsc_provisional);
 
     if (!pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         pe__set_resource_flags(*rsc, pe_rsc_managed);
     }
 
     (*rsc)->rsc_cons = NULL;
     (*rsc)->rsc_tickets = NULL;
     (*rsc)->actions = NULL;
     (*rsc)->role = pcmk_role_stopped;
     (*rsc)->next_role = pcmk_role_unknown;
 
     (*rsc)->recovery_type = pcmk_multiply_active_restart;
     (*rsc)->stickiness = 0;
     (*rsc)->migration_threshold = INFINITY;
     (*rsc)->failure_timeout = 0;
 
     value = g_hash_table_lookup((*rsc)->meta, XML_CIB_ATTR_PRIORITY);
     (*rsc)->priority = char2score(value);
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CRITICAL);
     if ((value == NULL) || crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_critical);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_NOTIFY);
     if (crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_notify);
     }
 
     if (xml_contains_remote_node((*rsc)->xml)) {
         (*rsc)->is_remote_node = TRUE;
         if (g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_CONTAINER)) {
             guest_node = true;
         } else {
             remote_node = true;
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_OP_ATTR_ALLOW_MIGRATE);
     if (crm_is_true(value)) {
         pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
     } else if ((value == NULL) && remote_node) {
         /* By default, we want remote nodes to be able
          * to float around the cluster without having to stop all the
          * resources within the remote-node before moving. Allowing
          * migration support enables this feature. If this ever causes
          * problems, migration support can be explicitly turned off with
          * allow-migrate=false.
          */
         pe__set_resource_flags(*rsc, pe_rsc_allow_migrate);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MANAGED);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         if (crm_is_true(value)) {
             pe__set_resource_flags(*rsc, pe_rsc_managed);
         } else {
             pe__clear_resource_flags(*rsc, pe_rsc_managed);
         }
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MAINTENANCE);
     if (crm_is_true(value)) {
         pe__clear_resource_flags(*rsc, pe_rsc_managed);
         pe__set_resource_flags(*rsc, pe_rsc_maintenance);
     }
     if (pcmk_is_set(data_set->flags, pe_flag_maintenance_mode)) {
         pe__clear_resource_flags(*rsc, pe_rsc_managed);
         pe__set_resource_flags(*rsc, pe_rsc_maintenance);
     }
 
     if (pe_rsc_is_clone(pe__const_top_resource(*rsc, false))) {
         value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_UNIQUE);
         if (crm_is_true(value)) {
             pe__set_resource_flags(*rsc, pe_rsc_unique);
         }
         if (detect_promotable(*rsc)) {
             pe__set_resource_flags(*rsc, pe_rsc_promotable);
         }
     } else {
         pe__set_resource_flags(*rsc, pe_rsc_unique);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_RESTART);
     if (pcmk__str_eq(value, "restart", pcmk__str_casei)) {
         (*rsc)->restart_type = pe_restart_restart;
         pe_rsc_trace((*rsc), "%s dependency restart handling: restart",
                      (*rsc)->id);
         pe_warn_once(pe_wo_restart_type,
                      "Support for restart-type is deprecated and will be removed in a future release");
 
     } else {
         (*rsc)->restart_type = pe_restart_ignore;
         pe_rsc_trace((*rsc), "%s dependency restart handling: ignore",
                      (*rsc)->id);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_MULTIPLE);
     if (pcmk__str_eq(value, "stop_only", pcmk__str_casei)) {
         (*rsc)->recovery_type = pcmk_multiply_active_stop;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: stop only",
                      (*rsc)->id);
 
     } else if (pcmk__str_eq(value, "block", pcmk__str_casei)) {
         (*rsc)->recovery_type = pcmk_multiply_active_block;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: block",
                      (*rsc)->id);
 
     } else if (pcmk__str_eq(value, "stop_unexpected", pcmk__str_casei)) {
         (*rsc)->recovery_type = pcmk_multiply_active_unexpected;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
                              "stop unexpected instances",
                      (*rsc)->id);
 
     } else { // "stop_start"
         if (!pcmk__str_eq(value, "stop_start",
                           pcmk__str_casei|pcmk__str_null_matches)) {
             pe_warn("%s is not a valid value for " XML_RSC_ATTR_MULTIPLE
                     ", using default of \"stop_start\"", value);
         }
         (*rsc)->recovery_type = pcmk_multiply_active_restart;
         pe_rsc_trace((*rsc), "%s multiple running resource recovery: "
                              "stop/start", (*rsc)->id);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_STICKINESS);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         (*rsc)->stickiness = char2score(value);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_STICKINESS);
     if (value != NULL && !pcmk__str_eq("default", value, pcmk__str_casei)) {
         (*rsc)->migration_threshold = char2score(value);
         if ((*rsc)->migration_threshold < 0) {
             /* @TODO We use 1 here to preserve previous behavior, but this
              * should probably use the default (INFINITY) or 0 (to disable)
              * instead.
              */
             pe_warn_once(pe_wo_neg_threshold,
                          XML_RSC_ATTR_FAIL_STICKINESS
                          " must be non-negative, using 1 instead");
             (*rsc)->migration_threshold = 1;
         }
     }
 
     if (pcmk__str_eq(crm_element_value((*rsc)->xml, XML_AGENT_ATTR_CLASS),
                      PCMK_RESOURCE_CLASS_STONITH, pcmk__str_casei)) {
         pe__set_working_set_flags(data_set, pe_flag_have_stonith_resource);
         pe__set_resource_flags(*rsc, pe_rsc_fence_device);
     }
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_REQUIRES);
     unpack_requires(*rsc, value, false);
 
     value = g_hash_table_lookup((*rsc)->meta, XML_RSC_ATTR_FAIL_TIMEOUT);
     if (value != NULL) {
         // Stored as seconds
         (*rsc)->failure_timeout = (int) (crm_parse_interval_spec(value) / 1000);
     }
 
     if (remote_node) {
         GHashTable *params = pe_rsc_params(*rsc, NULL, data_set);
 
         /* Grabbing the value now means that any rules based on node attributes
          * will evaluate to false, so such rules should not be used with
          * reconnect_interval.
          *
          * @TODO Evaluate per node before using
          */
         value = g_hash_table_lookup(params, XML_REMOTE_ATTR_RECONNECT_INTERVAL);
         if (value) {
             /* reconnect delay works by setting failure_timeout and preventing the
              * connection from starting until the failure is cleared. */
             (*rsc)->remote_reconnect_ms = crm_parse_interval_spec(value);
             /* we want to override any default failure_timeout in use when remote
              * reconnect_interval is in use. */ 
             (*rsc)->failure_timeout = (*rsc)->remote_reconnect_ms / 1000;
         }
     }
 
     get_target_role(*rsc, &((*rsc)->next_role));
     pe_rsc_trace((*rsc), "%s desired next state: %s", (*rsc)->id,
                  (*rsc)->next_role != pcmk_role_unknown? role2text((*rsc)->next_role) : "default");
 
     if ((*rsc)->fns->unpack(*rsc, data_set) == FALSE) {
         (*rsc)->fns->free(*rsc);
         *rsc = NULL;
         return pcmk_rc_unpack_error;
     }
 
     if (pcmk_is_set(data_set->flags, pe_flag_symmetric_cluster)) {
         // This tag must stay exactly the same because it is tested elsewhere
         resource_location(*rsc, NULL, 0, "symmetric_default", data_set);
     } else if (guest_node) {
         /* remote resources tied to a container resource must always be allowed
          * to opt-in to the cluster. Whether the connection resource is actually
          * allowed to be placed on a node is dependent on the container resource */
         resource_location(*rsc, NULL, 0, "remote_connection_default", data_set);
     }
 
     pe_rsc_trace((*rsc), "%s action notification: %s", (*rsc)->id,
                  pcmk_is_set((*rsc)->flags, pe_rsc_notify)? "required" : "not required");
 
     (*rsc)->utilization = pcmk__strkey_table(free, free);
 
     pe__unpack_dataset_nvpairs((*rsc)->xml, XML_TAG_UTILIZATION, &rule_data,
                                (*rsc)->utilization, NULL, FALSE, data_set);
 
     if (expanded_xml) {
         if (add_template_rsc(xml_obj, data_set) == FALSE) {
             (*rsc)->fns->free(*rsc);
             *rsc = NULL;
             return pcmk_rc_unpack_error;
         }
     }
     return pcmk_rc_ok;
 }
 
 gboolean
 is_parent(pe_resource_t *child, pe_resource_t *rsc)
 {
     pe_resource_t *parent = child;
 
     if (parent == NULL || rsc == NULL) {
         return FALSE;
     }
     while (parent->parent != NULL) {
         if (parent->parent == rsc) {
             return TRUE;
         }
         parent = parent->parent;
     }
     return FALSE;
 }
 
 pe_resource_t *
 uber_parent(pe_resource_t * rsc)
 {
     pe_resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while (parent->parent != NULL && parent->parent->variant != pe_container) {
         parent = parent->parent;
     }
     return parent;
 }
 
 /*!
  * \internal
  * \brief Get the topmost parent of a resource as a const pointer
  *
  * \param[in] rsc             Resource to check
  * \param[in] include_bundle  If true, go all the way to bundle
  *
  * \return \p NULL if \p rsc is NULL, \p rsc if \p rsc has no parent,
  *         the bundle if \p rsc is bundled and \p include_bundle is true,
  *         otherwise the topmost parent of \p rsc up to a clone
  */
 const pe_resource_t *
 pe__const_top_resource(const pe_resource_t *rsc, bool include_bundle)
 {
     const pe_resource_t *parent = rsc;
 
     if (parent == NULL) {
         return NULL;
     }
     while (parent->parent != NULL) {
         if (!include_bundle && (parent->parent->variant == pe_container)) {
             break;
         }
         parent = parent->parent;
     }
     return parent;
 }
 
 void
 common_free(pe_resource_t * rsc)
 {
     if (rsc == NULL) {
         return;
     }
 
     pe_rsc_trace(rsc, "Freeing %s %d", rsc->id, rsc->variant);
 
     g_list_free(rsc->rsc_cons);
     g_list_free(rsc->rsc_cons_lhs);
     g_list_free(rsc->rsc_tickets);
     g_list_free(rsc->dangling_migrations);
 
     if (rsc->parameter_cache != NULL) {
         g_hash_table_destroy(rsc->parameter_cache);
     }
     if (rsc->meta != NULL) {
         g_hash_table_destroy(rsc->meta);
     }
     if (rsc->utilization != NULL) {
         g_hash_table_destroy(rsc->utilization);
     }
 
     if ((rsc->parent == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
         free_xml(rsc->orig_xml);
         rsc->orig_xml = NULL;
 
         /* if rsc->orig_xml, then rsc->xml is an expanded xml from a template */
     } else if (rsc->orig_xml) {
         free_xml(rsc->xml);
         rsc->xml = NULL;
     }
     if (rsc->running_on) {
         g_list_free(rsc->running_on);
         rsc->running_on = NULL;
     }
     if (rsc->known_on) {
         g_hash_table_destroy(rsc->known_on);
         rsc->known_on = NULL;
     }
     if (rsc->actions) {
         g_list_free(rsc->actions);
         rsc->actions = NULL;
     }
     if (rsc->allowed_nodes) {
         g_hash_table_destroy(rsc->allowed_nodes);
         rsc->allowed_nodes = NULL;
     }
     g_list_free(rsc->fillers);
     g_list_free(rsc->rsc_location);
     pe_rsc_trace(rsc, "Resource freed");
     free(rsc->id);
     free(rsc->clone_name);
     free(rsc->allocated_to);
     free(rsc->variant_opaque);
     free(rsc->pending_task);
     free(rsc);
 }
 
 /*!
  * \internal
  * \brief Count a node and update most preferred to it as appropriate
  *
  * \param[in]     rsc          An active resource
  * \param[in]     node         A node that \p rsc is active on
  * \param[in,out] active       This will be set to \p node if \p node is more
  *                             preferred than the current value
  * \param[in,out] count_all    If not NULL, this will be incremented
  * \param[in,out] count_clean  If not NULL, this will be incremented if \p node
  *                             is online and clean
  *
  * \return true if the count should continue, or false if sufficiently known
  */
 bool
 pe__count_active_node(const pe_resource_t *rsc, pe_node_t *node,
                       pe_node_t **active, unsigned int *count_all,
                       unsigned int *count_clean)
 {
     bool keep_looking = false;
     bool is_happy = false;
 
     CRM_CHECK((rsc != NULL) && (node != NULL) && (active != NULL),
               return false);
 
     is_happy = node->details->online && !node->details->unclean;
 
     if (count_all != NULL) {
         ++*count_all;
     }
     if ((count_clean != NULL) && is_happy) {
         ++*count_clean;
     }
     if ((count_all != NULL) || (count_clean != NULL)) {
         keep_looking = true; // We're counting, so go through entire list
     }
 
     if (rsc->partial_migration_source != NULL) {
         if (node->details == rsc->partial_migration_source->details) {
             *active = node; // This is the migration source
         } else {
             keep_looking = true;
         }
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         if (is_happy && ((*active == NULL) || !(*active)->details->online
                          || (*active)->details->unclean)) {
             *active = node; // This is the first clean node
         } else {
             keep_looking = true;
         }
     }
     if (*active == NULL) {
         *active = node; // This is the first node checked
     }
     return keep_looking;
 }
 
 // Shared implementation of resource_object_functions_t:active_node()
 static pe_node_t *
 active_node(const pe_resource_t *rsc, unsigned int *count_all,
             unsigned int *count_clean)
 {
     pe_node_t *active = NULL;
 
     if (count_all != NULL) {
         *count_all = 0;
     }
     if (count_clean != NULL) {
         *count_clean = 0;
     }
     if (rsc == NULL) {
         return NULL;
     }
     for (GList *iter = rsc->running_on; iter != NULL; iter = iter->next) {
         if (!pe__count_active_node(rsc, (pe_node_t *) iter->data, &active,
                                    count_all, count_clean)) {
             break; // Don't waste time iterating if we don't have to
         }
     }
     return active;
 }
 
 /*!
  * \brief
  * \internal Find and count active nodes according to "requires"
  *
  * \param[in]  rsc    Resource to check
  * \param[out] count  If not NULL, will be set to count of active nodes
  *
  * \return An active node (or NULL if resource is not active anywhere)
  *
  * \note This is a convenience wrapper for active_node() where the count of all
  *       active nodes or only clean active nodes is desired according to the
  *       "requires" meta-attribute.
  */
 pe_node_t *
 pe__find_active_requires(const pe_resource_t *rsc, unsigned int *count)
 {
     if (rsc == NULL) {
         if (count != NULL) {
             *count = 0;
         }
         return NULL;
 
     } else if (pcmk_is_set(rsc->flags, pe_rsc_needs_fencing)) {
         return rsc->fns->active_node(rsc, count, NULL);
 
     } else {
         return rsc->fns->active_node(rsc, NULL, count);
     }
 }
 
 void
 pe__count_common(pe_resource_t *rsc)
 {
     if (rsc->children != NULL) {
         for (GList *item = rsc->children; item != NULL; item = item->next) {
             ((pe_resource_t *) item->data)->fns->count(item->data);
         }
 
     } else if (!pcmk_is_set(rsc->flags, pe_rsc_orphan)
                || (rsc->role > pcmk_role_stopped)) {
         rsc->cluster->ninstances++;
         if (pe__resource_is_disabled(rsc)) {
             rsc->cluster->disabled_resources++;
         }
         if (pcmk_is_set(rsc->flags, pe_rsc_block)) {
             rsc->cluster->blocked_resources++;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Update a resource's next role
  *
  * \param[in,out] rsc   Resource to be updated
  * \param[in]     role  Resource's new next role
  * \param[in]     why   Human-friendly reason why role is changing (for logs)
  */
 void
 pe__set_next_role(pe_resource_t *rsc, enum rsc_role_e role, const char *why)
 {
     CRM_ASSERT((rsc != NULL) && (why != NULL));
     if (rsc->next_role != role) {
         pe_rsc_trace(rsc, "Resetting next role for %s from %s to %s (%s)",
                      rsc->id, role2text(rsc->next_role), role2text(role), why);
         rsc->next_role = role;
     }
 }