diff --git a/include/crm/msg_xml.h b/include/crm/msg_xml.h
index abf34cc3d3..ff3da3822a 100644
--- a/include/crm/msg_xml.h
+++ b/include/crm/msg_xml.h
@@ -1,460 +1,461 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__CRM_MSG_XML__H
 #  define PCMK__CRM_MSG_XML__H
 
 #  include <crm/common/xml.h>
 
 #if !defined(PCMK_ALLOW_DEPRECATED) || (PCMK_ALLOW_DEPRECATED == 1)
 #include <crm/msg_xml_compat.h>
 #endif
 
 #ifdef __cplusplus
 extern "C" {
 #endif
 
 /* This file defines constants for various XML syntax (mainly element and
  * attribute names).
  *
  * For consistency, new constants should start with "PCMK_", followed by "XE"
  * for XML element names, "XA" for XML attribute names, and "META" for meta
  * attribute names. Old names that don't follow this policy should eventually be
  * deprecated and replaced with names that do.
  */
 
 /*
  * XML elements
  */
 
 #define PCMK_XE_DATE_EXPRESSION             "date_expression"
 #define PCMK_XE_OP_EXPRESSION               "op_expression"
 
 /* This has been deprecated as a CIB element (an alias for <clone> with
  * "promotable" set to "true") since 2.0.0.
  */
 #define PCMK_XE_PROMOTABLE_LEGACY           "master"
 
 #define PCMK_XE_RSC_EXPRESSION              "rsc_expression"
 
 
 /*
  * XML attributes
  */
 
 #define PCMK_XA_ADMIN_EPOCH                 "admin_epoch"
 #define PCMK_XA_CIB_LAST_WRITTEN            "cib-last-written"
 #define PCMK_XA_CRM_DEBUG_ORIGIN            "crm-debug-origin"
 #define PCMK_XA_CRM_FEATURE_SET             "crm_feature_set"
 #define PCMK_XA_CRM_TIMESTAMP               "crm-timestamp"
 #define PCMK_XA_DESCRIPTION                 "description"
 #define PCMK_XA_EPOCH                       "epoch"
 #define PCMK_XA_FORMAT                      "format"
 #define PCMK_XA_HAVE_QUORUM                 "have-quorum"
 #define PCMK_XA_ID                          "id"
 #define PCMK_XA_NO_QUORUM_PANIC             "no-quorum-panic"
 #define PCMK_XA_NUM_UPDATES                 "num_updates"
 #define PCMK_XA_VALIDATE_WITH               "validate-with"
 #define PCMK_XA_VERSION                     "version"
 
 
 /*
  * Meta attributes
  */
 
 #define PCMK_META_CLONE_MAX                 "clone-max"
 #define PCMK_META_CLONE_MIN                 "clone-min"
 #define PCMK_META_CLONE_NODE_MAX            "clone-node-max"
+#define PCMK_META_CONTAINER_ATTR_TARGET     "container-attribute-target"
 #define PCMK_META_ENABLED                   "enabled"
 #define PCMK_META_FAILURE_TIMEOUT           "failure-timeout"
 #define PCMK_META_MIGRATION_THRESHOLD       "migration-threshold"
 #define PCMK_META_PROMOTED_MAX              "promoted-max"
 #define PCMK_META_PROMOTED_NODE_MAX         "promoted-node-max"
 
 
 /*
  * Older constants that don't follow current naming
  */
 
 #  ifndef F_ORIG
 #    define F_ORIG    "src"
 #  endif
 
 #  ifndef F_SEQ
 #    define F_SEQ		"seq"
 #  endif
 
 #  ifndef F_SUBTYPE
 #    define F_SUBTYPE "subt"
 #  endif
 
 #  ifndef F_TYPE
 #    define F_TYPE    "t"
 #  endif
 
 #  ifndef F_CLIENTNAME
 #    define	F_CLIENTNAME	"cn"
 #  endif
 
 #  ifndef F_XML_TAGNAME
 #    define F_XML_TAGNAME	"__name__"
 #  endif
 
 #  ifndef T_CRM
 #    define T_CRM     "crmd"
 #  endif
 
 #  ifndef T_ATTRD
 #    define T_ATTRD     "attrd"
 #  endif
 
 #  define CIB_OPTIONS_FIRST "cib-bootstrap-options"
 
 #  define F_CRM_DATA			"crm_xml"
 #  define F_CRM_TASK			"crm_task"
 #  define F_CRM_HOST_TO			"crm_host_to"
 #  define F_CRM_MSG_TYPE		F_SUBTYPE
 #  define F_CRM_SYS_TO			"crm_sys_to"
 #  define F_CRM_SYS_FROM		"crm_sys_from"
 #  define F_CRM_HOST_FROM		F_ORIG
 #  define F_CRM_REFERENCE		XML_ATTR_REFERENCE
 #  define F_CRM_VERSION			PCMK_XA_VERSION
 #  define F_CRM_ORIGIN			"origin"
 #  define F_CRM_USER			"crm_user"
 #  define F_CRM_JOIN_ID			"join_id"
 #  define F_CRM_DC_LEAVING      "dc-leaving"
 #  define F_CRM_ELECTION_ID		"election-id"
 #  define F_CRM_ELECTION_AGE_S		"election-age-sec"
 #  define F_CRM_ELECTION_AGE_US		"election-age-nano-sec"
 #  define F_CRM_ELECTION_OWNER		"election-owner"
 #  define F_CRM_TGRAPH			"crm-tgraph-file"
 #  define F_CRM_TGRAPH_INPUT		"crm-tgraph-in"
 
 #  define F_CRM_THROTTLE_MODE		"crm-limit-mode"
 #  define F_CRM_THROTTLE_MAX		"crm-limit-max"
 
 /*---- Common tags/attrs */
 #  define XML_DIFF_MARKER		"__crm_diff_marker__"
 #  define XML_TAG_CIB			"cib"
 #  define XML_TAG_FAILED		"failed"
 
 #  define XML_ATTR_TIMEOUT		"timeout"
 #  define XML_ATTR_NAME			"name"
 #  define XML_ATTR_IDREF			"id-ref"
 #  define XML_ATTR_ID_LONG		"long-id"
 #  define XML_ATTR_TYPE			"type"
 #  define XML_ATTR_OP			"op"
 #  define XML_ATTR_DC_UUID		"dc-uuid"
 #  define XML_ATTR_UPDATE_ORIG		"update-origin"
 #  define XML_ATTR_UPDATE_CLIENT	"update-client"
 #  define XML_ATTR_UPDATE_USER		"update-user"
 
 #  define XML_BOOLEAN_TRUE		"true"
 #  define XML_BOOLEAN_FALSE		"false"
 #  define XML_BOOLEAN_YES		XML_BOOLEAN_TRUE
 #  define XML_BOOLEAN_NO		XML_BOOLEAN_FALSE
 
 #  define XML_TAG_OPTIONS		"options"
 
 /*---- top level tags/attrs */
 #  define XML_ATTR_REQUEST		"request"
 #  define XML_ATTR_RESPONSE		"response"
 
 #  define XML_ATTR_UNAME		"uname"
 #  define XML_ATTR_REFERENCE		"reference"
 
 #  define XML_CRM_TAG_PING		"ping_response"
 #  define XML_PING_ATTR_STATUS		"result"
 #  define XML_PING_ATTR_SYSFROM		"crm_subsystem"
 #  define XML_PING_ATTR_CRMDSTATE   "crmd_state"
 #  define XML_PING_ATTR_PACEMAKERDSTATE "pacemakerd_state"
 #  define XML_PING_ATTR_PACEMAKERDSTATE_INIT "init"
 #  define XML_PING_ATTR_PACEMAKERDSTATE_STARTINGDAEMONS "starting_daemons"
 #  define XML_PING_ATTR_PACEMAKERDSTATE_WAITPING "wait_for_ping"
 #  define XML_PING_ATTR_PACEMAKERDSTATE_RUNNING "running"
 #  define XML_PING_ATTR_PACEMAKERDSTATE_SHUTTINGDOWN "shutting_down"
 #  define XML_PING_ATTR_PACEMAKERDSTATE_SHUTDOWNCOMPLETE "shutdown_complete"
 #  define XML_PING_ATTR_PACEMAKERDSTATE_REMOTE "remote"
 
 #  define XML_FAIL_TAG_CIB		"failed_update"
 
 #  define XML_FAILCIB_ATTR_OBJTYPE	"object_type"
 #  define XML_FAILCIB_ATTR_OP		"operation"
 #  define XML_FAILCIB_ATTR_REASON	"reason"
 
 /*---- CIB specific tags/attrs */
 #  define XML_CIB_TAG_SECTION_ALL	"all"
 #  define XML_CIB_TAG_CONFIGURATION	"configuration"
 #  define XML_CIB_TAG_STATUS       	"status"
 #  define XML_CIB_TAG_RESOURCES		"resources"
 #  define XML_CIB_TAG_NODES         	"nodes"
 #  define XML_CIB_TAG_CONSTRAINTS   	"constraints"
 #  define XML_CIB_TAG_CRMCONFIG   	"crm_config"
 #  define XML_CIB_TAG_OPCONFIG		"op_defaults"
 #  define XML_CIB_TAG_RSCCONFIG   	"rsc_defaults"
 #  define XML_CIB_TAG_ACLS   		"acls"
 #  define XML_CIB_TAG_ALERTS    	"alerts"
 #  define XML_CIB_TAG_ALERT   		"alert"
 #  define XML_CIB_TAG_ALERT_RECIPIENT	"recipient"
 #  define XML_CIB_TAG_ALERT_SELECT      "select"
 #  define XML_CIB_TAG_ALERT_ATTRIBUTES  "select_attributes"
 #  define XML_CIB_TAG_ALERT_FENCING     "select_fencing"
 #  define XML_CIB_TAG_ALERT_NODES       "select_nodes"
 #  define XML_CIB_TAG_ALERT_RESOURCES   "select_resources"
 #  define XML_CIB_TAG_ALERT_ATTR        "attribute"
 
 #  define XML_CIB_TAG_STATE         	"node_state"
 #  define XML_CIB_TAG_NODE          	"node"
 #  define XML_CIB_TAG_NVPAIR        	"nvpair"
 
 #  define XML_CIB_TAG_PROPSET	   	"cluster_property_set"
 #  define XML_TAG_ATTR_SETS	   	"instance_attributes"
 #  define XML_TAG_META_SETS	   	"meta_attributes"
 #  define XML_TAG_ATTRS			"attributes"
 #  define XML_TAG_PARAMS		"parameters"
 #  define XML_TAG_PARAM			"param"
 #  define XML_TAG_UTILIZATION		"utilization"
 
 #  define XML_TAG_RESOURCE_REF		"resource_ref"
 #  define XML_CIB_TAG_RESOURCE	  	"primitive"
 #  define XML_CIB_TAG_GROUP	  	"group"
 #  define XML_CIB_TAG_INCARNATION	"clone"
 #  define XML_CIB_TAG_CONTAINER		"bundle"
 
 #  define XML_CIB_TAG_RSC_TEMPLATE	"template"
 
 #  define XML_RSC_ATTR_TARGET           "container-attribute-target"
 #  define XML_RSC_ATTR_RESTART	  	"restart-type"
 #  define XML_RSC_ATTR_ORDERED		"ordered"
 #  define XML_RSC_ATTR_INTERLEAVE	"interleave"
 #  define XML_RSC_ATTR_INCARNATION	"clone"
 #  define XML_RSC_ATTR_PROMOTABLE       "promotable"
 #  define XML_RSC_ATTR_MANAGED		"is-managed"
 #  define XML_RSC_ATTR_TARGET_ROLE	"target-role"
 #  define XML_RSC_ATTR_UNIQUE		"globally-unique"
 #  define XML_RSC_ATTR_NOTIFY		"notify"
 #  define XML_RSC_ATTR_STICKINESS	"resource-stickiness"
 #  define XML_RSC_ATTR_MULTIPLE		"multiple-active"
 #  define XML_RSC_ATTR_REQUIRES		"requires"
 #  define XML_RSC_ATTR_CONTAINER	"container"
 #  define XML_RSC_ATTR_INTERNAL_RSC	"internal_rsc"
 #  define XML_RSC_ATTR_MAINTENANCE	"maintenance"
 #  define XML_RSC_ATTR_REMOTE_NODE  	"remote-node"
 #  define XML_RSC_ATTR_CLEAR_OP         "clear_failure_op"
 #  define XML_RSC_ATTR_CLEAR_INTERVAL   "clear_failure_interval"
 #  define XML_RSC_ATTR_REMOTE_RA_ADDR   "addr"
 #  define XML_RSC_ATTR_REMOTE_RA_SERVER "server"
 #  define XML_RSC_ATTR_REMOTE_RA_PORT   "port"
 #  define XML_RSC_ATTR_CRITICAL         "critical"
 
 #  define XML_REMOTE_ATTR_RECONNECT_INTERVAL "reconnect_interval"
 
 #  define XML_OP_ATTR_ON_FAIL		"on-fail"
 #  define XML_OP_ATTR_START_DELAY	"start-delay"
 #  define XML_OP_ATTR_ALLOW_MIGRATE	"allow-migrate"
 #  define XML_OP_ATTR_ORIGIN		"interval-origin"
 #  define XML_OP_ATTR_PENDING		"record-pending"
 #  define XML_OP_ATTR_DIGESTS_ALL       "digests-all"
 #  define XML_OP_ATTR_DIGESTS_SECURE    "digests-secure"
 
 #  define XML_CIB_TAG_LRM		"lrm"
 #  define XML_LRM_TAG_RESOURCES     	"lrm_resources"
 #  define XML_LRM_TAG_RESOURCE     	"lrm_resource"
 #  define XML_LRM_TAG_RSC_OP		"lrm_rsc_op"
 #  define XML_AGENT_ATTR_CLASS		"class"
 #  define XML_AGENT_ATTR_PROVIDER	"provider"
 
 //! \deprecated Do not use (will be removed in a future release)
 #  define XML_CIB_ATTR_REPLACE       	"replace"
 
 #  define XML_CIB_ATTR_PRIORITY     	"priority"
 
 #  define XML_NODE_IS_REMOTE    	"remote_node"
 #  define XML_NODE_IS_FENCED		"node_fenced"
 #  define XML_NODE_IS_MAINTENANCE   "node_in_maintenance"
 
 #  define XML_CIB_ATTR_SHUTDOWN       	"shutdown"
 
 /* Aside from being an old name for the executor, LRM is a misnomer here because
  * the controller and scheduler use these to track actions, which are not always
  * executor operations.
  */
 
 // XML attribute that takes interval specification (user-facing configuration)
 #  define XML_LRM_ATTR_INTERVAL		"interval"
 
 // XML attribute that takes interval in milliseconds (daemon APIs)
 // (identical value as above, but different constant allows clearer code intent)
 #  define XML_LRM_ATTR_INTERVAL_MS  XML_LRM_ATTR_INTERVAL
 
 #  define XML_LRM_ATTR_TASK		"operation"
 #  define XML_LRM_ATTR_TASK_KEY		"operation_key"
 #  define XML_LRM_ATTR_TARGET		"on_node"
 #  define XML_LRM_ATTR_TARGET_UUID	"on_node_uuid"
 /*! Actions to be executed on Pacemaker Remote nodes are routed through the
  *  controller on the cluster node hosting the remote connection. That cluster
  *  node is considered the router node for the action.
  */
 #  define XML_LRM_ATTR_ROUTER_NODE  "router_node"
 #  define XML_LRM_ATTR_RSCID		"rsc-id"
 #  define XML_LRM_ATTR_OPSTATUS		"op-status"
 #  define XML_LRM_ATTR_RC		"rc-code"
 #  define XML_LRM_ATTR_CALLID		"call-id"
 #  define XML_LRM_ATTR_OP_DIGEST	"op-digest"
 #  define XML_LRM_ATTR_OP_RESTART	"op-force-restart"
 #  define XML_LRM_ATTR_OP_SECURE	"op-secure-params"
 #  define XML_LRM_ATTR_RESTART_DIGEST	"op-restart-digest"
 #  define XML_LRM_ATTR_SECURE_DIGEST	"op-secure-digest"
 #  define XML_LRM_ATTR_EXIT_REASON	"exit-reason"
 
 #  define XML_RSC_OP_LAST_CHANGE        "last-rc-change"
 #  define XML_RSC_OP_T_EXEC             "exec-time"
 #  define XML_RSC_OP_T_QUEUE            "queue-time"
 
 #  define XML_LRM_ATTR_MIGRATE_SOURCE	"migrate_source"
 #  define XML_LRM_ATTR_MIGRATE_TARGET	"migrate_target"
 
 #  define XML_TAG_GRAPH			"transition_graph"
 #  define XML_GRAPH_TAG_RSC_OP		"rsc_op"
 #  define XML_GRAPH_TAG_PSEUDO_EVENT	"pseudo_event"
 #  define XML_GRAPH_TAG_CRM_EVENT	"crm_event"
 #  define XML_GRAPH_TAG_DOWNED            "downed"
 #  define XML_GRAPH_TAG_MAINTENANCE       "maintenance"
 
 #  define XML_TAG_RULE			"rule"
 #  define XML_RULE_ATTR_SCORE		"score"
 #  define XML_RULE_ATTR_SCORE_ATTRIBUTE	"score-attribute"
 #  define XML_RULE_ATTR_ROLE		"role"
 #  define XML_RULE_ATTR_BOOLEAN_OP	"boolean-op"
 
 #  define XML_TAG_EXPRESSION		"expression"
 #  define XML_EXPR_ATTR_ATTRIBUTE	"attribute"
 #  define XML_EXPR_ATTR_OPERATION	"operation"
 #  define XML_EXPR_ATTR_VALUE		"value"
 #  define XML_EXPR_ATTR_TYPE		"type"
 #  define XML_EXPR_ATTR_VALUE_SOURCE	"value-source"
 
 #  define XML_CONS_TAG_RSC_DEPEND	"rsc_colocation"
 #  define XML_CONS_TAG_RSC_ORDER	"rsc_order"
 #  define XML_CONS_TAG_RSC_LOCATION	"rsc_location"
 #  define XML_CONS_TAG_RSC_TICKET	"rsc_ticket"
 #  define XML_CONS_TAG_RSC_SET		"resource_set"
 #  define XML_CONS_ATTR_SYMMETRICAL	"symmetrical"
 
 #  define XML_LOCATION_ATTR_DISCOVERY	"resource-discovery"
 
 #  define XML_COLOC_ATTR_SOURCE		"rsc"
 #  define XML_COLOC_ATTR_SOURCE_ROLE	"rsc-role"
 #  define XML_COLOC_ATTR_TARGET		"with-rsc"
 #  define XML_COLOC_ATTR_TARGET_ROLE	"with-rsc-role"
 #  define XML_COLOC_ATTR_NODE_ATTR	"node-attribute"
 #  define XML_COLOC_ATTR_INFLUENCE          "influence"
 
 //! \deprecated Deprecated since 2.1.5
 #  define XML_COLOC_ATTR_SOURCE_INSTANCE	"rsc-instance"
 
 //! \deprecated Deprecated since 2.1.5
 #  define XML_COLOC_ATTR_TARGET_INSTANCE	"with-rsc-instance"
 
 #  define XML_LOC_ATTR_SOURCE           "rsc"
 #  define XML_LOC_ATTR_SOURCE_PATTERN   "rsc-pattern"
 
 #  define XML_ORDER_ATTR_FIRST		"first"
 #  define XML_ORDER_ATTR_THEN		"then"
 #  define XML_ORDER_ATTR_FIRST_ACTION	"first-action"
 #  define XML_ORDER_ATTR_THEN_ACTION	"then-action"
 #  define XML_ORDER_ATTR_KIND		"kind"
 
 //! \deprecated Deprecated since 2.1.5
 #  define XML_ORDER_ATTR_FIRST_INSTANCE	"first-instance"
 
 //! \deprecated Deprecated since 2.1.5
 #  define XML_ORDER_ATTR_THEN_INSTANCE	"then-instance"
 
 #  define XML_TICKET_ATTR_TICKET	"ticket"
 #  define XML_TICKET_ATTR_LOSS_POLICY	"loss-policy"
 
 #  define XML_NVPAIR_ATTR_NAME        	"name"
 #  define XML_NVPAIR_ATTR_VALUE        	"value"
 
 #  define XML_NODE_ATTR_RSC_DISCOVERY   "resource-discovery-enabled"
 
 #  define XML_ALERT_ATTR_PATH		"path"
 #  define XML_ALERT_ATTR_TIMEOUT	"timeout"
 #  define XML_ALERT_ATTR_TSTAMP_FORMAT	"timestamp-format"
 #  define XML_ALERT_ATTR_REC_VALUE	"value"
 
 #  define XML_CIB_TAG_GENERATION_TUPPLE	"generation_tuple"
 
 #  define XML_ATTR_TRANSITION_MAGIC	"transition-magic"
 #  define XML_ATTR_TRANSITION_KEY	"transition-key"
 
 #  define XML_ATTR_TE_NOWAIT		"op_no_wait"
 #  define XML_ATTR_TE_TARGET_RC		"op_target_rc"
 #  define XML_TAG_TRANSIENT_NODEATTRS	"transient_attributes"
 
 //! \deprecated Do not use (will be removed in a future release)
 #  define XML_TAG_DIFF_ADDED		"diff-added"
 
 //! \deprecated Do not use (will be removed in a future release)
 #  define XML_TAG_DIFF_REMOVED		"diff-removed"
 
 #  define XML_ACL_TAG_USER		"acl_target"
 #  define XML_ACL_TAG_USERv1		"acl_user"
 #  define XML_ACL_TAG_GROUP		"acl_group"
 #  define XML_ACL_TAG_ROLE		"acl_role"
 #  define XML_ACL_TAG_PERMISSION	"acl_permission"
 #  define XML_ACL_TAG_ROLE_REF 		"role"
 #  define XML_ACL_TAG_ROLE_REFv1	"role_ref"
 #  define XML_ACL_ATTR_KIND		"kind"
 #  define XML_ACL_TAG_READ		"read"
 #  define XML_ACL_TAG_WRITE		"write"
 #  define XML_ACL_TAG_DENY		"deny"
 #  define XML_ACL_ATTR_REF		"reference"
 #  define XML_ACL_ATTR_REFv1		"ref"
 #  define XML_ACL_ATTR_TAG		"object-type"
 #  define XML_ACL_ATTR_TAGv1		"tag"
 #  define XML_ACL_ATTR_XPATH		"xpath"
 #  define XML_ACL_ATTR_ATTRIBUTE	"attribute"
 
 #  define XML_CIB_TAG_TICKETS		"tickets"
 #  define XML_CIB_TAG_TICKET_STATE	"ticket_state"
 
 #  define XML_CIB_TAG_TAGS   		"tags"
 #  define XML_CIB_TAG_TAG   		"tag"
 #  define XML_CIB_TAG_OBJ_REF 		"obj_ref"
 
 #  define XML_TAG_FENCING_TOPOLOGY      "fencing-topology"
 #  define XML_TAG_FENCING_LEVEL         "fencing-level"
 #  define XML_ATTR_STONITH_INDEX        "index"
 #  define XML_ATTR_STONITH_TARGET       "target"
 #  define XML_ATTR_STONITH_TARGET_VALUE     "target-value"
 #  define XML_ATTR_STONITH_TARGET_PATTERN   "target-pattern"
 #  define XML_ATTR_STONITH_TARGET_ATTRIBUTE "target-attribute"
 #  define XML_ATTR_STONITH_DEVICES      "devices"
 
 #  define XML_TAG_DIFF                  "diff"
 #  define XML_DIFF_VERSION              "version"
 #  define XML_DIFF_VSOURCE              "source"
 #  define XML_DIFF_VTARGET              "target"
 #  define XML_DIFF_CHANGE               "change"
 #  define XML_DIFF_LIST                 "change-list"
 #  define XML_DIFF_ATTR                 "change-attr"
 #  define XML_DIFF_RESULT               "change-result"
 #  define XML_DIFF_OP                   "operation"
 #  define XML_DIFF_PATH                 "path"
 #  define XML_DIFF_POSITION             "position"
 
 #  define ID(x) crm_element_value(x, PCMK_XA_ID)
 
 #ifdef __cplusplus
 }
 #endif
 
 #endif
diff --git a/lib/common/attrs.c b/lib/common/attrs.c
index 76e3f0de0a..35dd833f5c 100644
--- a/lib/common/attrs.c
+++ b/lib/common/attrs.c
@@ -1,98 +1,98 @@
 /*
- * Copyright 2011-2023 the Pacemaker project contributors
+ * Copyright 2011-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef _GNU_SOURCE
 #  define _GNU_SOURCE
 #endif
 
 #include <crm_internal.h>
 
 #include <stdio.h>
 
 #include <crm/msg_xml.h>
 #include <crm/common/attrd_internal.h>
 
 #define OCF_RESKEY_PREFIX "OCF_RESKEY_"
 #define LRM_TARGET_ENV OCF_RESKEY_PREFIX CRM_META "_" XML_LRM_ATTR_TARGET
 
 /*!
  * \internal
  * \brief Get the node name that should be used to set node attributes
  *
  * If given NULL, "auto", or "localhost" as an argument, check the environment
  * to detect the node name that should be used to set node attributes. (The
  * caller might not know the correct name, for example if the target is part of
  * a bundle with container-attribute-target set to "host".)
  *
  * \param[in] name  NULL, "auto" or "localhost" to check environment variables,
  *                  or anything else to return NULL
  *
  * \return Node name that should be used for node attributes based on the
  *         environment if known, otherwise NULL
  */
 const char *
 pcmk__node_attr_target(const char *name)
 {
     if (name == NULL || pcmk__strcase_any_of(name, "auto", "localhost", NULL)) {
         char buf[128] = OCF_RESKEY_PREFIX;
         size_t offset = sizeof(OCF_RESKEY_PREFIX) - 1;
-        char *target_var = crm_meta_name(XML_RSC_ATTR_TARGET);
+        char *target_var = crm_meta_name(PCMK_META_CONTAINER_ATTR_TARGET);
         char *phys_var = crm_meta_name(PCMK__ENV_PHYSICAL_HOST);
         const char *target = NULL;
         const char *host_physical = NULL;
 
         snprintf(buf + offset, sizeof(buf) - offset, "%s", target_var);
         target = getenv(buf);
 
         snprintf(buf + offset, sizeof(buf) - offset, "%s", phys_var);
         host_physical = getenv(buf);
 
         // It is important to use the name by which the scheduler knows us
         if (host_physical && pcmk__str_eq(target, "host", pcmk__str_casei)) {
             name = host_physical;
 
         } else {
             const char *host_pcmk = getenv(LRM_TARGET_ENV);
 
             if (host_pcmk) {
                 name = host_pcmk;
             }
         }
         free(target_var);
         free(phys_var);
 
         // TODO? Call get_local_node_name() if name == NULL
         // (currently would require linkage against libcrmcluster)
         return name;
     } else {
         return NULL;
     }
 }
 
 /*!
  * \brief Return the name of the node attribute used as a promotion score
  *
  * \param[in] rsc_id  Resource ID that promotion score is for (or NULL to
  *                    check the OCF_RESOURCE_INSTANCE environment variable)
  *
  * \return Newly allocated string with the node attribute name (or NULL on
  *         error, including no ID or environment variable specified)
  * \note It is the caller's responsibility to free() the result.
  */
 char *
 pcmk_promotion_score_name(const char *rsc_id)
 {
     if (pcmk__str_empty(rsc_id)) {
         rsc_id = getenv("OCF_RESOURCE_INSTANCE");
         if (pcmk__str_empty(rsc_id)) {
             return NULL;
         }
     }
     return crm_strdup_printf("master-%s", rsc_id);
 }
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 60d739a064..162c5eec26 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,1163 +1,1164 @@
 /*
- * Copyright 2021-2023 the Pacemaker project contributors
+ * Copyright 2021-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__LIBPACEMAKER_PRIVATE__H
 #  define PCMK__LIBPACEMAKER_PRIVATE__H
 
 /* This header is for the sole use of libpacemaker, so that functions can be
  * declared with G_GNUC_INTERNAL for efficiency.
  */
 
 #include <crm/lrmd_events.h>      // lrmd_event_data_t
 #include <crm/common/scheduler.h> // pcmk_action_t, pcmk_node_t, etc.
 #include <crm/pengine/internal.h> // pcmk__location_t
 
 // Colocation flags
 enum pcmk__coloc_flags {
     pcmk__coloc_none        = 0U,
 
     // Primary is affected even if already active
     pcmk__coloc_influence   = (1U << 0),
 
     // Colocation was explicitly configured in CIB
     pcmk__coloc_explicit    = (1U << 1),
 };
 
 // Flags to modify the behavior of add_colocated_node_scores()
 enum pcmk__coloc_select {
     // With no other flags, apply all "with this" colocations
     pcmk__coloc_select_default      = 0,
 
     // Apply "this with" colocations instead of "with this" colocations
     pcmk__coloc_select_this_with    = (1 << 0),
 
     // Apply only colocations with non-negative scores
     pcmk__coloc_select_nonnegative  = (1 << 1),
 
     // Apply only colocations with at least one matching node
     pcmk__coloc_select_active       = (1 << 2),
 };
 
 // Flags the update_ordered_actions() method can return
 enum pcmk__updated {
     pcmk__updated_none      = 0,        // Nothing changed
     pcmk__updated_first     = (1 << 0), // First action was updated
     pcmk__updated_then      = (1 << 1), // Then action was updated
 };
 
 #define pcmk__set_updated_flags(au_flags, action, flags_to_set) do {        \
         au_flags = pcmk__set_flags_as(__func__, __LINE__,                   \
                                       LOG_TRACE, "Action update",           \
                                       (action)->uuid, au_flags,             \
                                       (flags_to_set), #flags_to_set);       \
     } while (0)
 
 #define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do {    \
         au_flags = pcmk__clear_flags_as(__func__, __LINE__,                 \
                                         LOG_TRACE, "Action update",         \
                                         (action)->uuid, au_flags,           \
                                         (flags_to_clear), #flags_to_clear); \
     } while (0)
 
 // Resource assignment methods
 struct resource_alloc_functions_s {
     /*!
      * \internal
      * \brief Assign a resource to a node
      *
      * \param[in,out] rsc           Resource to assign to a node
      * \param[in]     prefer        Node to prefer, if all else is equal
      * \param[in]     stop_if_fail  If \c true and \p rsc can't be assigned to a
      *                              node, set next role to stopped and update
      *                              existing actions (if \p rsc is not a
      *                              primitive, this applies to its primitive
      *                              descendants instead)
      *
      * \return Node that \p rsc is assigned to, if assigned entirely to one node
      *
      * \note If \p stop_if_fail is \c false, then \c pcmk__unassign_resource()
      *       can completely undo the assignment. A successful assignment can be
      *       either undone or left alone as final. A failed assignment has the
      *       same effect as calling pcmk__unassign_resource(); there are no side
      *       effects on roles or actions.
      */
     pcmk_node_t *(*assign)(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
                            bool stop_if_fail);
 
     /*!
      * \internal
      * \brief Create all actions needed for a given resource
      *
      * \param[in,out] rsc  Resource to create actions for
      */
     void (*create_actions)(pcmk_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Schedule any probes needed for a resource on a node
      *
      * \param[in,out] rsc   Resource to create probe for
      * \param[in,out] node  Node to create probe on
      *
      * \return true if any probe was created, otherwise false
      */
     bool (*create_probe)(pcmk_resource_t *rsc, pcmk_node_t *node);
 
     /*!
      * \internal
      * \brief Create implicit constraints needed for a resource
      *
      * \param[in,out] rsc  Resource to create implicit constraints for
      */
     void (*internal_constraints)(pcmk_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Apply a colocation's score to node scores or resource priority
      *
      * Given a colocation constraint, apply its score to the dependent's
      * allowed node scores (if we are still placing resources) or priority (if
      * we are choosing promotable clone instance roles).
      *
      * \param[in,out] dependent      Dependent resource in colocation
      * \param[in]     primary        Primary resource in colocation
      * \param[in]     colocation     Colocation constraint to apply
      * \param[in]     for_dependent  true if called on behalf of dependent
      */
     void (*apply_coloc_score)(pcmk_resource_t *dependent,
                               const pcmk_resource_t *primary,
                               const pcmk__colocation_t *colocation,
                               bool for_dependent);
 
     /*!
      * \internal
      * \brief Create list of all resources in colocations with a given resource
      *
      * Given a resource, create a list of all resources involved in mandatory
      * colocations with it, whether directly or via chained colocations.
      *
      * \param[in]     rsc             Resource to add to colocated list
      * \param[in]     orig_rsc        Resource originally requested
      * \param[in,out] colocated_rscs  Existing list
      *
      * \return List of given resource and all resources involved in colocations
      *
      * \note This function is recursive; top-level callers should pass NULL as
      *       \p colocated_rscs and \p orig_rsc, and the desired resource as
      *       \p rsc. The recursive calls will use other values.
      */
     GList *(*colocated_resources)(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList *colocated_rscs);
 
     /*!
      * \internal
      * \brief Add colocations affecting a resource as primary to a list
      *
      * Given a resource being assigned (\p orig_rsc) and a resource somewhere in
      * its chain of ancestors (\p rsc, which may be \p orig_rsc), get
      * colocations that affect the ancestor as primary and should affect the
      * resource, and add them to a given list.
      *
      * \param[in]     rsc       Resource whose colocations should be added
      * \param[in]     orig_rsc  Affected resource (\p rsc or a descendant)
      * \param[in,out] list      List of colocations to add to
      *
      * \note All arguments should be non-NULL.
      * \note The pcmk__with_this_colocations() wrapper should usually be used
      *       instead of using this method directly.
      */
     void (*with_this_colocations)(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList **list);
 
     /*!
      * \internal
      * \brief Add colocations affecting a resource as dependent to a list
      *
      * Given a resource being assigned (\p orig_rsc) and a resource somewhere in
      * its chain of ancestors (\p rsc, which may be \p orig_rsc), get
      * colocations that affect the ancestor as dependent and should affect the
      * resource, and add them to a given list.
      *
      *
      * \param[in]     rsc       Resource whose colocations should be added
      * \param[in]     orig_rsc  Affected resource (\p rsc or a descendant)
      * \param[in,out] list      List of colocations to add to
      *
      * \note All arguments should be non-NULL.
      * \note The pcmk__this_with_colocations() wrapper should usually be used
      *       instead of using this method directly.
      */
     void (*this_with_colocations)(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList **list);
 
     /*!
      * \internal
      * \brief Update nodes with scores of colocated resources' nodes
      *
      * Given a table of nodes and a resource, update the nodes' scores with the
      * scores of the best nodes matching the attribute used for each of the
      * resource's relevant colocations.
      *
      * \param[in,out] source_rsc  Resource whose node scores to add
      * \param[in]     target_rsc  Resource on whose behalf to update \p *nodes
      * \param[in]     log_id      Resource ID for logs (if \c NULL, use
      *                            \p source_rsc ID)
      * \param[in,out] nodes       Nodes to update (set initial contents to
      *                            \c NULL to copy allowed nodes from
      *                            \p source_rsc)
      * \param[in]     colocation  Original colocation constraint (used to get
      *                            configured primary resource's stickiness, and
      *                            to get colocation node attribute; if \c NULL,
      *                            <tt>source_rsc</tt>'s own matching node scores
      *                            will not be added, and \p *nodes must be
      *                            \c NULL as well)
      * \param[in]     factor      Incorporate scores multiplied by this factor
      * \param[in]     flags       Bitmask of enum pcmk__coloc_select values
      *
      * \note \c NULL \p target_rsc, \c NULL \p *nodes, \c NULL \p colocation,
      *       and the \c pcmk__coloc_select_this_with flag are used together (and
      *       only by \c cmp_resources()).
      * \note The caller remains responsible for freeing \p *nodes.
      */
     void (*add_colocated_node_scores)(pcmk_resource_t *source_rsc,
                                       const pcmk_resource_t *target_rsc,
                                       const char *log_id, GHashTable **nodes,
                                       const pcmk__colocation_t *colocation,
                                       float factor, uint32_t flags);
 
     /*!
      * \internal
      * \brief Apply a location constraint to a resource's allowed node scores
      *
      * \param[in,out] rsc       Resource to apply constraint to
      * \param[in,out] location  Location constraint to apply
      */
     void (*apply_location)(pcmk_resource_t *rsc, pcmk__location_t *location);
 
     /*!
      * \internal
      * \brief Return action flags for a given resource action
      *
      * \param[in,out] action  Action to get flags for
      * \param[in]     node    If not NULL, limit effects to this node
      *
      * \return Flags appropriate to \p action on \p node
      * \note For primitives, this will be the same as action->flags regardless
      *       of node. For collective resources, the flags can differ due to
      *       multiple instances possibly being involved.
      */
     uint32_t (*action_flags)(pcmk_action_t *action, const pcmk_node_t *node);
 
     /*!
      * \internal
      * \brief Update two actions according to an ordering between them
      *
      * Given information about an ordering of two actions, update the actions'
      * flags (and runnable_before members if appropriate) as appropriate for the
      * ordering. Effects may cascade to other orderings involving the actions as
      * well.
      *
      * \param[in,out] first      'First' action in an ordering
      * \param[in,out] then       'Then' action in an ordering
      * \param[in]     node       If not NULL, limit scope of ordering to this
      *                           node (only used when interleaving instances)
      * \param[in]     flags      Action flags for \p first for ordering purposes
      * \param[in]     filter     Action flags to limit scope of certain updates
      *                           (may include pcmk_action_optional to affect
      *                           only mandatory actions and pcmk_action_runnable
      *                           to affect only runnable actions)
      * \param[in]     type       Group of enum pcmk__action_relation_flags
      * \param[in,out] scheduler  Scheduler data
      *
      * \return Group of enum pcmk__updated flags indicating what was updated
      */
     uint32_t (*update_ordered_actions)(pcmk_action_t *first,
                                        pcmk_action_t *then,
                                        const pcmk_node_t *node, uint32_t flags,
                                        uint32_t filter, uint32_t type,
                                        pcmk_scheduler_t *scheduler);
 
     /*!
      * \internal
      * \brief Output a summary of scheduled actions for a resource
      *
      * \param[in,out] rsc  Resource to output actions for
      */
     void (*output_actions)(pcmk_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add a resource's actions to the transition graph
      *
      * \param[in,out] rsc  Resource whose actions should be added
      */
     void (*add_actions_to_graph)(pcmk_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add meta-attributes relevant to transition graph actions to XML
      *
      * If a given resource supports variant-specific meta-attributes that are
      * needed for transition graph actions, add them to a given XML element.
      *
      * \param[in]     rsc  Resource whose meta-attributes should be added
      * \param[in,out] xml  Transition graph action attributes XML to add to
      */
     void (*add_graph_meta)(const pcmk_resource_t *rsc, xmlNode *xml);
 
     /*!
      * \internal
      * \brief Add a resource's utilization to a table of utilization values
      *
      * This function is used when summing the utilization of a resource and all
      * resources colocated with it, to determine whether a node has sufficient
      * capacity. Given a resource and a table of utilization values, it will add
      * the resource's utilization to the existing values, if the resource has
      * not yet been assigned to a node.
      *
      * \param[in]     rsc          Resource with utilization to add
      * \param[in]     orig_rsc     Resource being assigned (for logging only)
      * \param[in]     all_rscs     List of all resources that will be summed
      * \param[in,out] utilization  Table of utilization values to add to
      */
     void (*add_utilization)(const pcmk_resource_t *rsc,
                             const pcmk_resource_t *orig_rsc, GList *all_rscs,
                             GHashTable *utilization);
 
     /*!
      * \internal
      * \brief Apply a shutdown lock for a resource, if appropriate
      *
      * \param[in,out] rsc       Resource to check for shutdown lock
      */
     void (*shutdown_lock)(pcmk_resource_t *rsc);
 };
 
 // Actions (pcmk_sched_actions.c)
 
 G_GNUC_INTERNAL
 void pcmk__update_action_for_orderings(pcmk_action_t *action,
                                        pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__update_ordered_actions(pcmk_action_t *first, pcmk_action_t *then,
                                       const pcmk_node_t *node, uint32_t flags,
                                       uint32_t filter, uint32_t type,
                                       pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 void pcmk__log_action(const char *pre_text, const pcmk_action_t *action,
                       bool details);
 
 G_GNUC_INTERNAL
 pcmk_action_t *pcmk__new_cancel_action(pcmk_resource_t *rsc, const char *name,
                                        guint interval_ms,
                                        const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 pcmk_action_t *pcmk__new_shutdown_action(pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_locks_rsc_to_node(const pcmk_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__deduplicate_action_inputs(pcmk_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__output_actions(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 bool pcmk__check_action_config(pcmk_resource_t *rsc, pcmk_node_t *node,
                                const xmlNode *xml_op);
 
 G_GNUC_INTERNAL
 void pcmk__handle_rsc_config_changes(pcmk_scheduler_t *scheduler);
 
 
 // Recurring actions (pcmk_sched_recurring.c)
 
 G_GNUC_INTERNAL
 void pcmk__create_recurring_actions(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cancel(pcmk_resource_t *rsc, const char *call_id,
                            const char *task, guint interval_ms,
                            const pcmk_node_t *node, const char *reason);
 
 G_GNUC_INTERNAL
 void pcmk__reschedule_recurring(pcmk_resource_t *rsc, const char *task,
                                 guint interval_ms, pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_is_recurring(const pcmk_action_t *action);
 
 
 // Producing transition graphs (pcmk_graph_producer.c)
 
 G_GNUC_INTERNAL
 bool pcmk__graph_has_loop(const pcmk_action_t *init_action,
                           const pcmk_action_t *action,
                           pcmk__related_action_t *input);
 
 G_GNUC_INTERNAL
 void pcmk__add_rsc_actions_to_graph(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_graph(pcmk_scheduler_t *scheduler);
 
 
 // Fencing (pcmk_sched_fencing.c)
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_fence(pcmk_action_t *stonith_op,
                           pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_unfence(const pcmk_resource_t *rsc, pcmk_node_t *node,
                             pcmk_action_t *action,
                             enum pcmk__action_relation_flags order);
 
 G_GNUC_INTERNAL
 void pcmk__fence_guest(pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__node_unfenced(const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
 
 
 // Injected scheduler inputs (pcmk_sched_injections.c)
 
 void pcmk__inject_scheduler_input(pcmk_scheduler_t *scheduler, cib_t *cib,
                                   const pcmk_injections_t *injections);
 
 
 // Constraints of any type (pcmk_sched_constraints.c)
 
 G_GNUC_INTERNAL
 pcmk_resource_t *pcmk__find_constraint_resource(GList *rsc_list,
                                                 const char *id);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
                                    const pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 bool pcmk__valid_resource_or_tag(const pcmk_scheduler_t *scheduler,
                                  const char *id, pcmk_resource_t **rsc,
                                  pcmk_tag_t **tag);
 
 G_GNUC_INTERNAL
 bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
                       bool convert_rsc, const pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 void pcmk__create_internal_constraints(pcmk_scheduler_t *scheduler);
 
 
 // Location constraints
 
 G_GNUC_INTERNAL
 void pcmk__unpack_location(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 pcmk__location_t *pcmk__new_location(const char *id, pcmk_resource_t *rsc,
                                      int node_score, const char *discover_mode,
                                      pcmk_node_t *foo_node);
 
 G_GNUC_INTERNAL
 void pcmk__apply_locations(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 void pcmk__apply_location(pcmk_resource_t *rsc, pcmk__location_t *constraint);
 
 
 // Colocation constraints (pcmk_sched_colocation.c)
 
 enum pcmk__coloc_affects {
     pcmk__coloc_affects_nothing = 0,
     pcmk__coloc_affects_location,
     pcmk__coloc_affects_role,
 };
 
 /*!
  * \internal
  * \brief Get the value of a colocation's node attribute
  *
  * When looking up a colocation node attribute on a bundle node for a bundle
  * primitive, we should always look on the bundle node's assigned host,
- * regardless of the value of XML_RSC_ATTR_TARGET. At most one resource (the
- * bundle primitive, if any) can run on a bundle node, so any colocation must
- * necessarily be evaluated with respect to the bundle node (the container).
+ * regardless of the value of \c PCMK_META_CONTAINER_ATTR_TARGET. At most one
+ * resource (the bundle primitive, if any) can run on a bundle node, so any
+ * colocation must necessarily be evaluated with respect to the bundle node
+ * (the container).
  *
  * \param[in] node  Node on which to look up the attribute
  * \param[in] attr  Name of attribute to look up
  * \param[in] rsc   Resource on whose behalf to look up the attribute
  *
  * \return Value of \p attr on \p node or on the host of \p node, as appropriate
  */
 static inline const char *
 pcmk__colocation_node_attr(const pcmk_node_t *node, const char *attr,
                            const pcmk_resource_t *rsc)
 {
     const pcmk_resource_t *top = pe__const_top_resource(rsc, false);
     const bool force_host = pe__is_bundle_node(node)
                             && pe_rsc_is_bundled(rsc)
                             && (top == pe__bundled_resource(rsc));
 
     return pe__node_attribute_calculated(node, attr, rsc,
                                          pcmk__rsc_node_assigned, force_host);
 }
 
 G_GNUC_INTERNAL
 enum pcmk__coloc_affects pcmk__colocation_affects(const pcmk_resource_t
                                                     *dependent,
                                                   const pcmk_resource_t
                                                     *primary,
                                                   const pcmk__colocation_t
                                                     *colocation,
                                                   bool preview);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_scores(pcmk_resource_t *dependent,
                                  const pcmk_resource_t *primary,
                                  const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_priority(pcmk_resource_t *dependent,
                                    const pcmk_resource_t *primary,
                                    const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__add_colocated_node_scores(pcmk_resource_t *source_rsc,
                                      const pcmk_resource_t *target_rsc,
                                      const char *log_id, GHashTable **nodes,
                                      const pcmk__colocation_t *colocation,
                                      float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__add_dependent_scores(gpointer data, gpointer user_data);
 
 G_GNUC_INTERNAL
 void pcmk__colocation_intersect_nodes(pcmk_resource_t *dependent,
                                       const pcmk_resource_t *primary,
                                       const pcmk__colocation_t *colocation,
                                       const GList *primary_nodes,
                                       bool merge_scores);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_colocation(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
                          const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__add_this_with_list(GList **list, GList *addition,
                               const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
                          const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__add_with_this_list(GList **list, GList *addition,
                               const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 GList *pcmk__with_this_colocations(const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 GList *pcmk__this_with_colocations(const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__new_colocation(const char *id, const char *node_attr, int score,
                           pcmk_resource_t *dependent, pcmk_resource_t *primary,
                           const char *dependent_role, const char *primary_role,
                           uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__block_colocation_dependents(pcmk_action_t *action);
 
 /*!
  * \internal
  * \brief Check whether colocation's dependent preferences should be considered
  *
  * \param[in] colocation  Colocation constraint
  * \param[in] rsc         Primary instance (normally this will be
  *                        colocation->primary, which NULL will be treated as,
  *                        but for clones or bundles with multiple instances
  *                        this can be a particular instance)
  *
  * \return true if colocation influence should be effective, otherwise false
  */
 static inline bool
 pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
                                const pcmk_resource_t *rsc)
 {
     if (rsc == NULL) {
         rsc = colocation->primary;
     }
 
     /* A bundle replica colocates its remote connection with its container,
      * using a finite score so that the container can run on Pacemaker Remote
      * nodes.
      *
      * Moving a connection is lightweight and does not interrupt the service,
      * while moving a container is heavyweight and does interrupt the service,
      * so don't move a clean, active container based solely on the preferences
      * of its connection.
      *
      * This also avoids problematic scenarios where two containers want to
      * perpetually swap places.
      */
     if (pcmk_is_set(colocation->dependent->flags,
                     pcmk_rsc_remote_nesting_allowed)
         && !pcmk_is_set(rsc->flags, pcmk_rsc_failed)
         && pcmk__list_of_1(rsc->running_on)) {
         return false;
     }
 
     /* The dependent in a colocation influences the primary's location
      * if the influence option is true or the primary is not yet active.
      */
     return pcmk_is_set(colocation->flags, pcmk__coloc_influence)
            || (rsc->running_on == NULL);
 }
 
 
 // Ordering constraints (pcmk_sched_ordering.c)
 
 G_GNUC_INTERNAL
 void pcmk__new_ordering(pcmk_resource_t *first_rsc, char *first_task,
                         pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
                         char *then_task, pcmk_action_t *then_action,
                         uint32_t flags, pcmk_scheduler_t *sched);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_ordering(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 void pcmk__disable_invalid_orderings(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 void pcmk__order_stops_before_shutdown(pcmk_node_t *node,
                                        pcmk_action_t *shutdown_op);
 
 G_GNUC_INTERNAL
 void pcmk__apply_orderings(pcmk_scheduler_t *sched);
 
 G_GNUC_INTERNAL
 void pcmk__order_after_each(pcmk_action_t *after, GList *list);
 
 
 /*!
  * \internal
  * \brief Create a new ordering between two resource actions
  *
  * \param[in,out] first_rsc   Resource for 'first' action
  * \param[in,out] first_task  Action key for 'first' action
  * \param[in]     then_rsc    Resource for 'then' action
  * \param[in,out] then_task   Action key for 'then' action
  * \param[in]     flags       Group of enum pcmk__action_relation_flags
  */
 #define pcmk__order_resource_actions(first_rsc, first_task,                 \
                                      then_rsc, then_task, flags)            \
     pcmk__new_ordering((first_rsc),                                         \
                        pcmk__op_key((first_rsc)->id, (first_task), 0),      \
                        NULL,                                                \
                        (then_rsc),                                          \
                        pcmk__op_key((then_rsc)->id, (then_task), 0),        \
                        NULL, (flags), (first_rsc)->cluster)
 
 #define pcmk__order_starts(rsc1, rsc2, flags)                \
     pcmk__order_resource_actions((rsc1), PCMK_ACTION_START,  \
                                  (rsc2), PCMK_ACTION_START, (flags))
 
 #define pcmk__order_stops(rsc1, rsc2, flags)                 \
     pcmk__order_resource_actions((rsc1), PCMK_ACTION_STOP,   \
                                  (rsc2), PCMK_ACTION_STOP, (flags))
 
 
 // Ticket constraints (pcmk_sched_tickets.c)
 
 G_GNUC_INTERNAL
 void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pcmk_scheduler_t *scheduler);
 
 
 // Promotable clone resources (pcmk_sched_promotable.c)
 
 G_GNUC_INTERNAL
 void pcmk__add_promotion_scores(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__require_promotion_tickets(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__set_instance_roles(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_promotable_actions(pcmk_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__promotable_restart_ordering(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__order_promotable_instances(pcmk_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__update_dependent_with_promotable(const pcmk_resource_t *primary,
                                             pcmk_resource_t *dependent,
                                             const pcmk__colocation_t
                                                 *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__update_promotable_dependent_priority(const pcmk_resource_t *primary,
                                                 pcmk_resource_t *dependent,
                                                 const pcmk__colocation_t
                                                     *colocation);
 
 
 // Pacemaker Remote nodes (pcmk_sched_remote.c)
 
 G_GNUC_INTERNAL
 bool pcmk__is_failed_remote_node(const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
                                     const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 pcmk_node_t *pcmk__connection_host_for_action(const pcmk_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params);
 
 G_GNUC_INTERNAL
 void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml,
                                   const pcmk_action_t *action);
 
 
 // Primitives (pcmk_sched_primitive.c)
 
 G_GNUC_INTERNAL
 pcmk_node_t *pcmk__primitive_assign(pcmk_resource_t *rsc,
                                     const pcmk_node_t *prefer,
                                     bool stop_if_fail);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_create_actions(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_internal_constraints(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__primitive_action_flags(pcmk_action_t *action,
                                       const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_apply_coloc_score(pcmk_resource_t *dependent,
                                        const pcmk_resource_t *primary,
                                        const pcmk__colocation_t *colocation,
                                        bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_primitive_colocations(const pcmk_resource_t *rsc,
                                       const pcmk_resource_t *orig_rsc,
                                       GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_with_colocations(const pcmk_resource_t *rsc,
                                       const pcmk_resource_t *orig_rsc,
                                       GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cleanup(pcmk_resource_t *rsc, const pcmk_node_t *node,
                             bool optional);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_add_utilization(const pcmk_resource_t *rsc,
                                      const pcmk_resource_t *orig_rsc,
                                      GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_shutdown_lock(pcmk_resource_t *rsc);
 
 
 // Groups (pcmk_sched_group.c)
 
 G_GNUC_INTERNAL
 pcmk_node_t *pcmk__group_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
                                 bool stop_if_fail);
 
 G_GNUC_INTERNAL
 void pcmk__group_create_actions(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__group_internal_constraints(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_coloc_score(pcmk_resource_t *dependent,
                                    const pcmk_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_group_colocations(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__group_with_colocations(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_colocated_node_scores(pcmk_resource_t *source_rsc,
                                            const pcmk_resource_t *target_rsc,
                                            const char *log_id,
                                            GHashTable **nodes,
                                            const pcmk__colocation_t *colocation,
                                            float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_location(pcmk_resource_t *rsc,
                                 pcmk__location_t *location);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__group_action_flags(pcmk_action_t *action,
                                   const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__group_update_ordered_actions(pcmk_action_t *first,
                                             pcmk_action_t *then,
                                             const pcmk_node_t *node,
                                             uint32_t flags, uint32_t filter,
                                             uint32_t type,
                                             pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 GList *pcmk__group_colocated_resources(const pcmk_resource_t *rsc,
                                        const pcmk_resource_t *orig_rsc,
                                        GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_utilization(const pcmk_resource_t *rsc,
                                  const pcmk_resource_t *orig_rsc,
                                  GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__group_shutdown_lock(pcmk_resource_t *rsc);
 
 
 // Clones (pcmk_sched_clone.c)
 
 G_GNUC_INTERNAL
 pcmk_node_t *pcmk__clone_assign(pcmk_resource_t *rsc, const pcmk_node_t *prefer,
                                 bool stop_if_fail);
 
 G_GNUC_INTERNAL
 void pcmk__clone_create_actions(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__clone_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__clone_internal_constraints(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_coloc_score(pcmk_resource_t *dependent,
                                    const pcmk_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_clone_colocations(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__clone_with_colocations(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_location(pcmk_resource_t *rsc,
                                 pcmk__location_t *constraint);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__clone_action_flags(pcmk_action_t *action,
                                   const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_actions_to_graph(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_utilization(const pcmk_resource_t *rsc,
                                  const pcmk_resource_t *orig_rsc,
                                  GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__clone_shutdown_lock(pcmk_resource_t *rsc);
 
 // Bundles (pcmk_sched_bundle.c)
 
 G_GNUC_INTERNAL
 pcmk_node_t *pcmk__bundle_assign(pcmk_resource_t *rsc,
                                  const pcmk_node_t *prefer, bool stop_if_fail);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_create_actions(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__bundle_create_probe(pcmk_resource_t *rsc, pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_internal_constraints(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_apply_coloc_score(pcmk_resource_t *dependent,
                                     const pcmk_resource_t *primary,
                                     const pcmk__colocation_t *colocation,
                                     bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_bundle_colocations(const pcmk_resource_t *rsc,
                                    const pcmk_resource_t *orig_rsc,
                                    GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_with_colocations(const pcmk_resource_t *rsc,
                                    const pcmk_resource_t *orig_rsc,
                                    GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_apply_location(pcmk_resource_t *rsc,
                                  pcmk__location_t *constraint);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__bundle_action_flags(pcmk_action_t *action,
                                    const pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__output_bundle_actions(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_add_actions_to_graph(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_add_utilization(const pcmk_resource_t *rsc,
                                   const pcmk_resource_t *orig_rsc,
                                   GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_shutdown_lock(pcmk_resource_t *rsc);
 
 
 // Clone instances or bundle replica containers (pcmk_sched_instances.c)
 
 G_GNUC_INTERNAL
 void pcmk__assign_instances(pcmk_resource_t *collective, GList *instances,
                             int max_total, int max_per_node);
 
 G_GNUC_INTERNAL
 void pcmk__create_instance_actions(pcmk_resource_t *rsc, GList *instances);
 
 G_GNUC_INTERNAL
 bool pcmk__instance_matches(const pcmk_resource_t *instance,
                             const pcmk_node_t *node, enum rsc_role_e role,
                             bool current);
 
 G_GNUC_INTERNAL
 pcmk_resource_t *pcmk__find_compatible_instance(const pcmk_resource_t *match_rsc,
                                                 const pcmk_resource_t *rsc,
                                                 enum rsc_role_e role,
                                                 bool current);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__instance_update_ordered_actions(pcmk_action_t *first,
                                                pcmk_action_t *then,
                                                const pcmk_node_t *node,
                                                uint32_t flags, uint32_t filter,
                                                uint32_t type,
                                                pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__collective_action_flags(pcmk_action_t *action,
                                        const GList *instances,
                                        const pcmk_node_t *node);
 
 
 // Injections (pcmk_injections.c)
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
                                         bool up);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
                                        const char *resource,
                                        const char *lrm_name,
                                        const char *rclass,
                                        const char *rtype,
                                        const char *rprovider);
 
 G_GNUC_INTERNAL
 void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
                             const char *resource, const char *task,
                             guint interval_ms, int rc);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
                                     lrmd_event_data_t *op, int target_rc);
 
 
 // Nodes (pcmk_sched_nodes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__node_available(const pcmk_node_t *node, bool consider_score,
                           bool consider_guest);
 
 G_GNUC_INTERNAL
 bool pcmk__any_node_available(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GHashTable *pcmk__copy_node_table(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 void pcmk__copy_node_tables(const pcmk_resource_t *rsc, GHashTable **copy);
 
 G_GNUC_INTERNAL
 void pcmk__restore_node_tables(pcmk_resource_t *rsc, GHashTable *backup);
 
 G_GNUC_INTERNAL
 GList *pcmk__sort_nodes(GList *nodes, pcmk_node_t *active_node);
 
 G_GNUC_INTERNAL
 void pcmk__apply_node_health(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 pcmk_node_t *pcmk__top_allowed_node(const pcmk_resource_t *rsc,
                                     const pcmk_node_t *node);
 
 
 // Functions applying to more than one variant (pcmk_sched_resource.c)
 
 G_GNUC_INTERNAL
 void pcmk__set_assignment_methods(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_agent_changed(pcmk_resource_t *rsc, pcmk_node_t *node,
                              const xmlNode *rsc_entry, bool active_on_node);
 
 G_GNUC_INTERNAL
 GList *pcmk__rscs_matching_id(const char *id,
                               const pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 GList *pcmk__colocated_resources(const pcmk_resource_t *rsc,
                                  const pcmk_resource_t *orig_rsc,
                                  GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__noop_add_graph_meta(const pcmk_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__output_resource_actions(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_resource(pcmk_resource_t *rsc, pcmk_node_t *node, bool force,
                            bool stop_if_fail);
 
 G_GNUC_INTERNAL
 void pcmk__unassign_resource(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__threshold_reached(pcmk_resource_t *rsc, const pcmk_node_t *node,
                              pcmk_resource_t **failed);
 
 G_GNUC_INTERNAL
 void pcmk__sort_resources(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
 
 
 // Functions related to probes (pcmk_sched_probes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__probe_rsc_on_node(pcmk_resource_t *rsc, pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_probes(pcmk_scheduler_t *scheduler);
 
 G_GNUC_INTERNAL
 bool pcmk__probe_resource_list(GList *rscs, pcmk_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_probes(pcmk_scheduler_t *scheduler);
 
 
 // Functions related to live migration (pcmk_sched_migration.c)
 
 void pcmk__create_migration_actions(pcmk_resource_t *rsc,
                                     const pcmk_node_t *current);
 
 void pcmk__abort_dangling_migration(void *data, void *user_data);
 
 bool pcmk__rsc_can_migrate(const pcmk_resource_t *rsc,
                            const pcmk_node_t *current);
 
 void pcmk__order_migration_equivalents(pcmk__action_relation_t *order);
 
 
 // Functions related to node utilization (pcmk_sched_utilization.c)
 
 G_GNUC_INTERNAL
 int pcmk__compare_node_capacities(const pcmk_node_t *node1,
                                   const pcmk_node_t *node2);
 
 G_GNUC_INTERNAL
 void pcmk__consume_node_capacity(GHashTable *current_utilization,
                                  const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__release_node_capacity(GHashTable *current_utilization,
                                  const pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 const pcmk_node_t *pcmk__ban_insufficient_capacity(pcmk_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_utilization_constraints(pcmk_resource_t *rsc,
                                           const GList *allowed_nodes);
 
 G_GNUC_INTERNAL
 void pcmk__show_node_capacities(const char *desc, pcmk_scheduler_t *scheduler);
 
 #endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_remote.c b/lib/pacemaker/pcmk_sched_remote.c
index c915389e06..a7b2143ecf 100644
--- a/lib/pacemaker/pcmk_sched_remote.c
+++ b/lib/pacemaker/pcmk_sched_remote.c
@@ -1,729 +1,731 @@
 /*
- * Copyright 2004-2023 the Pacemaker project contributors
+ * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <sys/param.h>
 
 #include <crm/crm.h>
 #include <crm/cib.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/xml_internal.h>
 
 #include <glib.h>
 
 #include <crm/pengine/status.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 enum remote_connection_state {
     remote_state_unknown = 0,
     remote_state_alive = 1,
     remote_state_resting = 2,
     remote_state_failed = 3,
     remote_state_stopped = 4
 };
 
 static const char *
 state2text(enum remote_connection_state state)
 {
     switch (state) {
         case remote_state_unknown:
             return "unknown";
         case remote_state_alive:
             return "alive";
         case remote_state_resting:
             return "resting";
         case remote_state_failed:
             return "failed";
         case remote_state_stopped:
             return "stopped";
     }
 
     return "impossible";
 }
 
 /* We always use pcmk__ar_guest_allowed with these convenience functions to
  * exempt internally generated constraints from the prohibition of user
  * constraints involving remote connection resources.
  *
  * The start ordering additionally uses pcmk__ar_unrunnable_first_blocks so that
  * the specified action is not runnable if the start is not runnable.
  */
 
 static inline void
 order_start_then_action(pcmk_resource_t *first_rsc, pcmk_action_t *then_action,
                         uint32_t extra)
 {
     if ((first_rsc != NULL) && (then_action != NULL)) {
         pcmk__new_ordering(first_rsc, start_key(first_rsc), NULL,
                            then_action->rsc, NULL, then_action,
                            pcmk__ar_guest_allowed
                            |pcmk__ar_unrunnable_first_blocks
                            |extra,
                            first_rsc->cluster);
     }
 }
 
 static inline void
 order_action_then_stop(pcmk_action_t *first_action, pcmk_resource_t *then_rsc,
                        uint32_t extra)
 {
     if ((first_action != NULL) && (then_rsc != NULL)) {
         pcmk__new_ordering(first_action->rsc, NULL, first_action,
                            then_rsc, stop_key(then_rsc), NULL,
                            pcmk__ar_guest_allowed|extra, then_rsc->cluster);
     }
 }
 
 static enum remote_connection_state
 get_remote_node_state(const pcmk_node_t *node)
 {
     const pcmk_resource_t *remote_rsc = NULL;
     const pcmk_node_t *cluster_node = NULL;
 
     CRM_ASSERT(node != NULL);
 
     remote_rsc = node->details->remote_rsc;
     CRM_ASSERT(remote_rsc != NULL);
 
     cluster_node = pe__current_node(remote_rsc);
 
     /* If the cluster node the remote connection resource resides on
      * is unclean or went offline, we can't process any operations
      * on that remote node until after it starts elsewhere.
      */
     if ((remote_rsc->next_role == pcmk_role_stopped)
         || (remote_rsc->allocated_to == NULL)) {
 
         // The connection resource is not going to run anywhere
 
         if ((cluster_node != NULL) && cluster_node->details->unclean) {
             /* The remote connection is failed because its resource is on a
              * failed node and can't be recovered elsewhere, so we must fence.
              */
             return remote_state_failed;
         }
 
         if (!pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)) {
             /* Connection resource is cleanly stopped */
             return remote_state_stopped;
         }
 
         /* Connection resource is failed */
 
         if ((remote_rsc->next_role == pcmk_role_stopped)
             && remote_rsc->remote_reconnect_ms
             && node->details->remote_was_fenced
             && !pe__shutdown_requested(node)) {
 
             /* We won't know whether the connection is recoverable until the
              * reconnect interval expires and we reattempt connection.
              */
             return remote_state_unknown;
         }
 
         /* The remote connection is in a failed state. If there are any
          * resources known to be active on it (stop) or in an unknown state
          * (probe), we must assume the worst and fence it.
          */
         return remote_state_failed;
 
     } else if (cluster_node == NULL) {
         /* Connection is recoverable but not currently running anywhere, so see
          * if we can recover it first
          */
         return remote_state_unknown;
 
     } else if (cluster_node->details->unclean
                || !(cluster_node->details->online)) {
         // Connection is running on a dead node, see if we can recover it first
         return remote_state_resting;
 
     } else if (pcmk__list_of_multiple(remote_rsc->running_on)
                && (remote_rsc->partial_migration_source != NULL)
                && (remote_rsc->partial_migration_target != NULL)) {
         /* We're in the middle of migrating a connection resource, so wait until
          * after the migration completes before performing any actions.
          */
         return remote_state_resting;
 
     }
     return remote_state_alive;
 }
 
 /*!
  * \internal
  * \brief Order actions on remote node relative to actions for the connection
  *
  * \param[in,out] action    An action scheduled on a Pacemaker Remote node
  */
 static void
 apply_remote_ordering(pcmk_action_t *action)
 {
     pcmk_resource_t *remote_rsc = NULL;
     enum action_tasks task = text2task(action->task);
     enum remote_connection_state state = get_remote_node_state(action->node);
 
     uint32_t order_opts = pcmk__ar_none;
 
     if (action->rsc == NULL) {
         return;
     }
 
     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc != NULL);
 
     crm_trace("Order %s action %s relative to %s%s (state: %s)",
               action->task, action->uuid,
               pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
               remote_rsc->id, state2text(state));
 
     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
         /* Migration ops map to pcmk_action_unspecified, but we need to apply
          * the same ordering as for stop or demote (see get_router_node()).
          */
         task = pcmk_action_stop;
     }
 
     switch (task) {
         case pcmk_action_start:
         case pcmk_action_promote:
             order_opts = pcmk__ar_none;
 
             if (state == remote_state_failed) {
                 /* Force recovery, by making this action required */
                 pe__set_order_flags(order_opts, pcmk__ar_first_implies_then);
             }
 
             /* Ensure connection is up before running this action */
             order_start_then_action(remote_rsc, action, order_opts);
             break;
 
         case pcmk_action_stop:
             if (state == remote_state_alive) {
                 order_action_then_stop(action, remote_rsc,
                                        pcmk__ar_then_implies_first);
 
             } else if (state == remote_state_failed) {
                 /* The resource is active on the node, but since we don't have a
                  * valid connection, the only way to stop the resource is by
                  * fencing the node. There is no need to order the stop relative
                  * to the remote connection, since the stop will become implied
                  * by the fencing.
                  */
                 pe_fence_node(remote_rsc->cluster, action->node,
                               "resources are active but "
                               "connection is unrecoverable",
                               FALSE);
 
             } else if (remote_rsc->next_role == pcmk_role_stopped) {
                 /* State must be remote_state_unknown or remote_state_stopped.
                  * Since the connection is not coming back up in this
                  * transition, stop this resource first.
                  */
                 order_action_then_stop(action, remote_rsc,
                                        pcmk__ar_then_implies_first);
 
             } else {
                 /* The connection is going to be started somewhere else, so
                  * stop this resource after that completes.
                  */
                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
             }
             break;
 
         case pcmk_action_demote:
             /* Only order this demote relative to the connection start if the
              * connection isn't being torn down. Otherwise, the demote would be
              * blocked because the connection start would not be allowed.
              */
             if ((state == remote_state_resting)
                 || (state == remote_state_unknown)) {
 
                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
             } /* Otherwise we can rely on the stop ordering */
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (pcmk__action_is_recurring(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 order_start_then_action(remote_rsc, action,
                                         pcmk__ar_first_implies_then);
 
             } else {
                 pcmk_node_t *cluster_node = pe__current_node(remote_rsc);
 
                 if ((task == pcmk_action_monitor) && (state == remote_state_failed)) {
                     /* We would only be here if we do not know the state of the
                      * resource on the remote node. Since we have no way to find
                      * out, it is necessary to fence the node.
                      */
                     pe_fence_node(remote_rsc->cluster, action->node,
                                   "resources are in unknown state "
                                   "and connection is unrecoverable", FALSE);
                 }
 
                 if ((cluster_node != NULL) && (state == remote_state_stopped)) {
                     /* The connection is currently up, but is going down
                      * permanently. Make sure we check services are actually
                      * stopped _before_ we let the connection get closed.
                      */
                     order_action_then_stop(action, remote_rsc,
                                            pcmk__ar_unrunnable_first_blocks);
 
                 } else {
                     order_start_then_action(remote_rsc, action, pcmk__ar_none);
                 }
             }
             break;
     }
 }
 
 static void
 apply_container_ordering(pcmk_action_t *action)
 {
     /* VMs are also classified as containers for these purposes... in
      * that they both involve a 'thing' running on a real or remote
      * cluster node.
      *
      * This allows us to be smarter about the type and extent of
      * recovery actions required in various scenarios
      */
     pcmk_resource_t *remote_rsc = NULL;
     pcmk_resource_t *container = NULL;
     enum action_tasks task = text2task(action->task);
 
     CRM_ASSERT(action->rsc != NULL);
     CRM_ASSERT(action->node != NULL);
     CRM_ASSERT(pe__is_guest_or_remote_node(action->node));
 
     remote_rsc = action->node->details->remote_rsc;
     CRM_ASSERT(remote_rsc != NULL);
 
     container = remote_rsc->container;
     CRM_ASSERT(container != NULL);
 
     if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
         pe_fence_node(action->rsc->cluster, action->node, "container failed",
                       FALSE);
     }
 
     crm_trace("Order %s action %s relative to %s%s for %s%s",
               action->task, action->uuid,
               pcmk_is_set(remote_rsc->flags, pcmk_rsc_failed)? "failed " : "",
               remote_rsc->id,
               pcmk_is_set(container->flags, pcmk_rsc_failed)? "failed " : "",
               container->id);
 
     if (pcmk__strcase_any_of(action->task, PCMK_ACTION_MIGRATE_TO,
                              PCMK_ACTION_MIGRATE_FROM, NULL)) {
         /* Migration ops map to pcmk_action_unspecified, but we need to apply
          * the same ordering as for stop or demote (see get_router_node()).
          */
         task = pcmk_action_stop;
     }
 
     switch (task) {
         case pcmk_action_start:
         case pcmk_action_promote:
             // Force resource recovery if the container is recovered
             order_start_then_action(container, action,
                                     pcmk__ar_first_implies_then);
 
             // Wait for the connection resource to be up, too
             order_start_then_action(remote_rsc, action, pcmk__ar_none);
             break;
 
         case pcmk_action_stop:
         case pcmk_action_demote:
             if (pcmk_is_set(container->flags, pcmk_rsc_failed)) {
                 /* When the container representing a guest node fails, any stop
                  * or demote actions for resources running on the guest node
                  * are implied by the container stopping. This is similar to
                  * how fencing operations work for cluster nodes and remote
                  * nodes.
                  */
             } else {
                 /* Ensure the operation happens before the connection is brought
                  * down.
                  *
                  * If we really wanted to, we could order these after the
                  * connection start, IFF the container's current role was
                  * stopped (otherwise we re-introduce an ordering loop when the
                  * connection is restarting).
                  */
                 order_action_then_stop(action, remote_rsc, pcmk__ar_none);
             }
             break;
 
         default:
             /* Wait for the connection resource to be up */
             if (pcmk__action_is_recurring(action)) {
                 /* In case we ever get the recovery logic wrong, force
                  * recurring monitors to be restarted, even if just
                  * the connection was re-established
                  */
                 if (task != pcmk_action_unspecified) {
                     order_start_then_action(remote_rsc, action,
                                             pcmk__ar_first_implies_then);
                 }
             } else {
                 order_start_then_action(remote_rsc, action, pcmk__ar_none);
             }
             break;
     }
 }
 
 /*!
  * \internal
  * \brief Order all relevant actions relative to remote connection actions
  *
  * \param[in,out] scheduler  Scheduler data
  */
 void
 pcmk__order_remote_connection_actions(pcmk_scheduler_t *scheduler)
 {
     if (!pcmk_is_set(scheduler->flags, pcmk_sched_have_remote_nodes)) {
         return;
     }
 
     crm_trace("Creating remote connection orderings");
 
     for (GList *iter = scheduler->actions; iter != NULL; iter = iter->next) {
         pcmk_action_t *action = iter->data;
         pcmk_resource_t *remote = NULL;
 
         // We are only interested in resource actions
         if (action->rsc == NULL) {
             continue;
         }
 
         /* Special case: If we are clearing the failcount of an actual
          * remote connection resource, then make sure this happens before
          * any start of the resource in this transition.
          */
         if (action->rsc->is_remote_node &&
             pcmk__str_eq(action->task, PCMK_ACTION_CLEAR_FAILCOUNT,
                          pcmk__str_none)) {
 
             pcmk__new_ordering(action->rsc, NULL, action, action->rsc,
                                pcmk__op_key(action->rsc->id, PCMK_ACTION_START,
                                             0),
                                NULL, pcmk__ar_ordered, scheduler);
 
             continue;
         }
 
         // We are only interested in actions assigned to a node
         if (action->node == NULL) {
             continue;
         }
 
         if (!pe__is_guest_or_remote_node(action->node)) {
             continue;
         }
 
         /* We are only interested in real actions.
          *
          * @TODO This is probably wrong; pseudo-actions might be converted to
          * real actions and vice versa later in update_actions() at the end of
          * pcmk__apply_orderings().
          */
         if (pcmk_is_set(action->flags, pcmk_action_pseudo)) {
             continue;
         }
 
         remote = action->node->details->remote_rsc;
         if (remote == NULL) {
             // Orphaned
             continue;
         }
 
         /* Another special case: if a resource is moving to a Pacemaker Remote
          * node, order the stop on the original node after any start of the
          * remote connection. This ensures that if the connection fails to
          * start, we leave the resource running on the original node.
          */
         if (pcmk__str_eq(action->task, PCMK_ACTION_START, pcmk__str_none)) {
             for (GList *item = action->rsc->actions; item != NULL;
                  item = item->next) {
                 pcmk_action_t *rsc_action = item->data;
 
                 if (!pe__same_node(rsc_action->node, action->node)
                     && pcmk__str_eq(rsc_action->task, PCMK_ACTION_STOP,
                                     pcmk__str_none)) {
                     pcmk__new_ordering(remote, start_key(remote), NULL,
                                        action->rsc, NULL, rsc_action,
                                        pcmk__ar_ordered, scheduler);
                 }
             }
         }
 
         /* The action occurs across a remote connection, so create
          * ordering constraints that guarantee the action occurs while the node
          * is active (after start, before stop ... things like that).
          *
          * This is somewhat brittle in that we need to make sure the results of
          * this ordering are compatible with the result of get_router_node().
          * It would probably be better to add XML_LRM_ATTR_ROUTER_NODE as part
          * of this logic rather than create_graph_action().
          */
         if (remote->container) {
             crm_trace("Container ordering for %s", action->uuid);
             apply_container_ordering(action);
 
         } else {
             crm_trace("Remote ordering for %s", action->uuid);
             apply_remote_ordering(action);
         }
     }
 }
 
 /*!
  * \internal
  * \brief Check whether a node is a failed remote node
  *
  * \param[in] node  Node to check
  *
  * \return true if \p node is a failed remote node, false otherwise
  */
 bool
 pcmk__is_failed_remote_node(const pcmk_node_t *node)
 {
     return pe__is_remote_node(node) && (node->details->remote_rsc != NULL)
            && (get_remote_node_state(node) == remote_state_failed);
 }
 
 /*!
  * \internal
  * \brief Check whether a given resource corresponds to a given node as guest
  *
  * \param[in] rsc   Resource to check
  * \param[in] node  Node to check
  *
  * \return true if \p node is a guest node and \p rsc is its containing
  *         resource, otherwise false
  */
 bool
 pcmk__rsc_corresponds_to_guest(const pcmk_resource_t *rsc,
                                const pcmk_node_t *node)
 {
     return (rsc != NULL) && (rsc->fillers != NULL) && (node != NULL)
             && (node->details->remote_rsc != NULL)
             && (node->details->remote_rsc->container == rsc);
 }
 
 /*!
  * \internal
  * \brief Get proper connection host that a remote action must be routed through
  *
  * A remote connection resource might be starting, stopping, or migrating in the
  * same transition that an action needs to be executed on its Pacemaker Remote
  * node. Determine the proper node that the remote action should be routed
  * through.
  *
  * \param[in] action  (Potentially remote) action to route
  *
  * \return Connection host that action should be routed through if remote,
  *         otherwise NULL
  */
 pcmk_node_t *
 pcmk__connection_host_for_action(const pcmk_action_t *action)
 {
     pcmk_node_t *began_on = NULL;
     pcmk_node_t *ended_on = NULL;
     bool partial_migration = false;
     const char *task = action->task;
 
     if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_none)
         || !pe__is_guest_or_remote_node(action->node)) {
         return NULL;
     }
 
     CRM_ASSERT(action->node->details->remote_rsc != NULL);
 
     began_on = pe__current_node(action->node->details->remote_rsc);
     ended_on = action->node->details->remote_rsc->allocated_to;
     if (action->node->details->remote_rsc
         && (action->node->details->remote_rsc->container == NULL)
         && action->node->details->remote_rsc->partial_migration_target) {
         partial_migration = true;
     }
 
     if (began_on == NULL) {
         crm_trace("Routing %s for %s through remote connection's "
                   "next node %s (starting)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (ended_on? ended_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return ended_on;
     }
 
     if (ended_on == NULL) {
         crm_trace("Routing %s for %s through remote connection's "
                   "current node %s (stopping)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (began_on? began_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return began_on;
     }
 
     if (pe__same_node(began_on, ended_on)) {
         crm_trace("Routing %s for %s through remote connection's "
                   "current node %s (not moving)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (began_on? began_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return began_on;
     }
 
     /* If we get here, the remote connection is moving during this transition.
      * This means some actions for resources behind the connection will get
      * routed through the cluster node the connection resource is currently on,
      * and others are routed through the cluster node the connection will end up
      * on.
      */
 
     if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_none)) {
         task = g_hash_table_lookup(action->meta, "notify_operation");
     }
 
     /*
      * Stop, demote, and migration actions must occur before the connection can
      * move (these actions are required before the remote resource can stop). In
      * this case, we know these actions have to be routed through the initial
      * cluster node the connection resource lived on before the move takes
      * place.
      *
      * The exception is a partial migration of a (non-guest) remote connection
      * resource; in that case, all actions (even these) will be ordered after
      * the connection's pseudo-start on the migration target, so the target is
      * the router node.
      */
     if (pcmk__strcase_any_of(task, PCMK_ACTION_CANCEL, PCMK_ACTION_STOP,
                              PCMK_ACTION_DEMOTE, PCMK_ACTION_MIGRATE_FROM,
                              PCMK_ACTION_MIGRATE_TO, NULL)
         && !partial_migration) {
         crm_trace("Routing %s for %s through remote connection's "
                   "current node %s (moving)%s",
                   action->task, (action->rsc? action->rsc->id : "no resource"),
                   (began_on? began_on->details->uname : "none"),
                   partial_migration? " (partial migration)" : "");
         return began_on;
     }
 
     /* Everything else (start, promote, monitor, probe, refresh,
      * clear failcount, delete, ...) must occur after the connection starts on
      * the node it is moving to.
      */
     crm_trace("Routing %s for %s through remote connection's "
               "next node %s (moving)%s",
               action->task, (action->rsc? action->rsc->id : "no resource"),
               (ended_on? ended_on->details->uname : "none"),
               partial_migration? " (partial migration)" : "");
     return ended_on;
 }
 
 /*!
  * \internal
  * \brief Replace remote connection's addr="#uname" with actual address
  *
  * REMOTE_CONTAINER_HACK: If a given resource is a remote connection resource
  * with its "addr" parameter set to "#uname", pull the actual value from the
  * parameters evaluated without a node (which was put there earlier in
  * pcmk__create_graph() when the bundle's expand() method was called).
  *
  * \param[in,out] rsc     Resource to check
  * \param[in,out] params  Resource parameters evaluated per node
  */
 void
 pcmk__substitute_remote_addr(pcmk_resource_t *rsc, GHashTable *params)
 {
     const char *remote_addr = g_hash_table_lookup(params,
                                                   XML_RSC_ATTR_REMOTE_RA_ADDR);
 
     if (pcmk__str_eq(remote_addr, "#uname", pcmk__str_none)) {
         GHashTable *base = pe_rsc_params(rsc, NULL, rsc->cluster);
 
         remote_addr = g_hash_table_lookup(base, XML_RSC_ATTR_REMOTE_RA_ADDR);
         if (remote_addr != NULL) {
             g_hash_table_insert(params, strdup(XML_RSC_ATTR_REMOTE_RA_ADDR),
                                 strdup(remote_addr));
         }
     }
 }
 
 /*!
  * \brief Add special bundle meta-attributes to XML
  *
  * If a given action will be executed on a guest node (including a bundle),
  * add the special bundle meta-attribute "container-attribute-target" and
  * environment variable "physical_host" as XML attributes (using meta-attribute
  * naming).
  *
  * \param[in,out] args_xml  XML to add attributes to
  * \param[in]     action    Action to check
  */
 void
 pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pcmk_action_t *action)
 {
     const pcmk_node_t *guest = action->node;
     const pcmk_node_t *host = NULL;
     enum action_tasks task;
 
     if (!pe__is_guest_node(guest)) {
         return;
     }
 
     task = text2task(action->task);
     if ((task == pcmk_action_notify) || (task == pcmk_action_notified)) {
         task = text2task(g_hash_table_lookup(action->meta, "notify_operation"));
     }
 
     switch (task) {
         case pcmk_action_stop:
         case pcmk_action_stopped:
         case pcmk_action_demote:
         case pcmk_action_demoted:
             // "Down" actions take place on guest's current host
             host = pe__current_node(guest->details->remote_rsc->container);
             break;
 
         case pcmk_action_start:
         case pcmk_action_started:
         case pcmk_action_monitor:
         case pcmk_action_promote:
         case pcmk_action_promoted:
             // "Up" actions take place on guest's next host
             host = guest->details->remote_rsc->container->allocated_to;
             break;
 
         default:
             break;
     }
 
     if (host != NULL) {
-        hash2metafield((gpointer) XML_RSC_ATTR_TARGET,
-                       (gpointer) g_hash_table_lookup(action->rsc->meta,
-                                                      XML_RSC_ATTR_TARGET),
+        gpointer target = g_hash_table_lookup(action->rsc->meta,
+                                              PCMK_META_CONTAINER_ATTR_TARGET);
+
+        hash2metafield((gpointer) PCMK_META_CONTAINER_ATTR_TARGET,
+                       target,
                        (gpointer) args_xml);
         hash2metafield((gpointer) PCMK__ENV_PHYSICAL_HOST,
                        (gpointer) host->details->uname,
                        (gpointer) args_xml);
     }
 }
diff --git a/lib/pengine/bundle.c b/lib/pengine/bundle.c
index 3a9d99b742..5b7ea504e7 100644
--- a/lib/pengine/bundle.c
+++ b/lib/pengine/bundle.c
@@ -1,2226 +1,2228 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <ctype.h>
 #include <stdint.h>
 
 #include <crm/pengine/rules.h>
 #include <crm/pengine/status.h>
 #include <crm/pengine/internal.h>
 #include <crm/msg_xml.h>
 #include <crm/common/output.h>
 #include <crm/common/xml_internal.h>
 #include <pe_status_private.h>
 
 enum pe__bundle_mount_flags {
     pe__bundle_mount_none       = 0x00,
 
     // mount instance-specific subdirectory rather than source directly
     pe__bundle_mount_subdir     = 0x01
 };
 
 typedef struct {
     char *source;
     char *target;
     char *options;
     uint32_t flags; // bitmask of pe__bundle_mount_flags
 } pe__bundle_mount_t;
 
 typedef struct {
     char *source;
     char *target;
 } pe__bundle_port_t;
 
 enum pe__container_agent {
     PE__CONTAINER_AGENT_UNKNOWN,
     PE__CONTAINER_AGENT_DOCKER,
     PE__CONTAINER_AGENT_RKT,
     PE__CONTAINER_AGENT_PODMAN,
 };
 
 #define PE__CONTAINER_AGENT_UNKNOWN_S "unknown"
 #define PE__CONTAINER_AGENT_DOCKER_S  "docker"
 #define PE__CONTAINER_AGENT_RKT_S     "rkt"
 #define PE__CONTAINER_AGENT_PODMAN_S  "podman"
 
 typedef struct pe__bundle_variant_data_s {
         int promoted_max;
         int nreplicas;
         int nreplicas_per_host;
         char *prefix;
         char *image;
         const char *ip_last;
         char *host_network;
         char *host_netmask;
         char *control_port;
         char *container_network;
         char *ip_range_start;
         gboolean add_host;
         gchar *container_host_options;
         char *container_command;
         char *launcher_options;
         const char *attribute_target;
 
         pcmk_resource_t *child;
 
         GList *replicas;    // pcmk__bundle_replica_t *
         GList *ports;       // pe__bundle_port_t *
         GList *mounts;      // pe__bundle_mount_t *
 
         enum pe__container_agent agent_type;
 } pe__bundle_variant_data_t;
 
 #define get_bundle_variant_data(data, rsc)                      \
     CRM_ASSERT(rsc != NULL);                                    \
     CRM_ASSERT(rsc->variant == pcmk_rsc_variant_bundle);        \
     CRM_ASSERT(rsc->variant_opaque != NULL);                    \
     data = (pe__bundle_variant_data_t *) rsc->variant_opaque;
 
 /*!
  * \internal
  * \brief Get maximum number of bundle replicas allowed to run
  *
  * \param[in] rsc  Bundle or bundled resource to check
  *
  * \return Maximum replicas for bundle corresponding to \p rsc
  */
 int
 pe__bundle_max(const pcmk_resource_t *rsc)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
     return bundle_data->nreplicas;
 }
 
 /*!
  * \internal
  * \brief Get the resource inside a bundle
  *
  * \param[in] bundle  Bundle to check
  *
  * \return Resource inside \p bundle if any, otherwise NULL
  */
 pcmk_resource_t *
 pe__bundled_resource(const pcmk_resource_t *rsc)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, pe__const_top_resource(rsc, true));
     return bundle_data->child;
 }
 
 /*!
  * \internal
  * \brief Get containerized resource corresponding to a given bundle container
  *
  * \param[in] instance  Collective instance that might be a bundle container
  *
  * \return Bundled resource instance inside \p instance if it is a bundle
  *         container instance, otherwise NULL
  */
 const pcmk_resource_t *
 pe__get_rsc_in_container(const pcmk_resource_t *instance)
 {
     const pe__bundle_variant_data_t *data = NULL;
     const pcmk_resource_t *top = pe__const_top_resource(instance, true);
 
     if ((top == NULL) || (top->variant != pcmk_rsc_variant_bundle)) {
         return NULL;
     }
     get_bundle_variant_data(data, top);
 
     for (const GList *iter = data->replicas; iter != NULL; iter = iter->next) {
         const pcmk__bundle_replica_t *replica = iter->data;
 
         if (instance == replica->container) {
             return replica->child;
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Check whether a given node is created by a bundle
  *
  * \param[in] bundle  Bundle resource to check
  * \param[in] node    Node to check
  *
  * \return true if \p node is an instance of \p bundle, otherwise false
  */
 bool
 pe__node_is_bundle_instance(const pcmk_resource_t *bundle,
                             const pcmk_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pcmk__bundle_replica_t *replica = iter->data;
 
         if (pe__same_node(node, replica->node)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Get the container of a bundle's first replica
  *
  * \param[in] bundle  Bundle resource to get container for
  *
  * \return Container resource from first replica of \p bundle if any,
  *         otherwise NULL
  */
 pcmk_resource_t *
 pe__first_container(const pcmk_resource_t *bundle)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
     const pcmk__bundle_replica_t *replica = NULL;
 
     get_bundle_variant_data(bundle_data, bundle);
     if (bundle_data->replicas == NULL) {
         return NULL;
     }
     replica = bundle_data->replicas->data;
     return replica->container;
 }
 
 /*!
  * \internal
  * \brief Iterate over bundle replicas
  *
  * \param[in,out] bundle     Bundle to iterate over
  * \param[in]     fn         Function to call for each replica (its return value
  *                           indicates whether to continue iterating)
  * \param[in,out] user_data  Pointer to pass to \p fn
  */
 void
 pe__foreach_bundle_replica(pcmk_resource_t *bundle,
                            bool (*fn)(pcmk__bundle_replica_t *, void *),
                            void *user_data)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         if (!fn((pcmk__bundle_replica_t *) iter->data, user_data)) {
             break;
         }
     }
 }
 
 /*!
  * \internal
  * \brief Iterate over const bundle replicas
  *
  * \param[in]     bundle     Bundle to iterate over
  * \param[in]     fn         Function to call for each replica (its return value
  *                           indicates whether to continue iterating)
  * \param[in,out] user_data  Pointer to pass to \p fn
  */
 void
 pe__foreach_const_bundle_replica(const pcmk_resource_t *bundle,
                                  bool (*fn)(const pcmk__bundle_replica_t *,
                                             void *),
                                  void *user_data)
 {
     const pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, bundle);
     for (const GList *iter = bundle_data->replicas; iter != NULL;
          iter = iter->next) {
 
         if (!fn((const pcmk__bundle_replica_t *) iter->data, user_data)) {
             break;
         }
     }
 }
 
 static char *
 next_ip(const char *last_ip)
 {
     unsigned int oct1 = 0;
     unsigned int oct2 = 0;
     unsigned int oct3 = 0;
     unsigned int oct4 = 0;
     int rc = sscanf(last_ip, "%u.%u.%u.%u", &oct1, &oct2, &oct3, &oct4);
 
     if (rc != 4) {
         /*@ TODO check for IPv6 */
         return NULL;
 
     } else if (oct3 > 253) {
         return NULL;
 
     } else if (oct4 > 253) {
         ++oct3;
         oct4 = 1;
 
     } else {
         ++oct4;
     }
 
     return crm_strdup_printf("%u.%u.%u.%u", oct1, oct2, oct3, oct4);
 }
 
 static void
 allocate_ip(pe__bundle_variant_data_t *data, pcmk__bundle_replica_t *replica,
             GString *buffer)
 {
     if(data->ip_range_start == NULL) {
         return;
 
     } else if(data->ip_last) {
         replica->ipaddr = next_ip(data->ip_last);
 
     } else {
         replica->ipaddr = strdup(data->ip_range_start);
     }
 
     data->ip_last = replica->ipaddr;
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             if (data->add_host) {
                 g_string_append_printf(buffer, " --add-host=%s-%d:%s",
                                        data->prefix, replica->offset,
                                        replica->ipaddr);
             } else {
                 g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
                                        replica->ipaddr, data->prefix,
                                        replica->offset);
             }
             break;
 
         case PE__CONTAINER_AGENT_RKT:
             g_string_append_printf(buffer, " --hosts-entry=%s=%s-%d",
                                    replica->ipaddr, data->prefix,
                                    replica->offset);
             break;
 
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
 }
 
 static xmlNode *
 create_resource(const char *name, const char *provider, const char *kind)
 {
     xmlNode *rsc = create_xml_node(NULL, XML_CIB_TAG_RESOURCE);
 
     crm_xml_add(rsc, PCMK_XA_ID, name);
     crm_xml_add(rsc, XML_AGENT_ATTR_CLASS, PCMK_RESOURCE_CLASS_OCF);
     crm_xml_add(rsc, XML_AGENT_ATTR_PROVIDER, provider);
     crm_xml_add(rsc, XML_ATTR_TYPE, kind);
 
     return rsc;
 }
 
 /*!
  * \internal
  * \brief Check whether cluster can manage resource inside container
  *
  * \param[in,out] data  Container variant data
  *
  * \return TRUE if networking configuration is acceptable, FALSE otherwise
  *
  * \note The resource is manageable if an IP range or control port has been
  *       specified. If a control port is used without an IP range, replicas per
  *       host must be 1.
  */
 static bool
 valid_network(pe__bundle_variant_data_t *data)
 {
     if(data->ip_range_start) {
         return TRUE;
     }
     if(data->control_port) {
         if(data->nreplicas_per_host > 1) {
             pcmk__config_err("Specifying the 'control-port' for %s requires "
                              "'replicas-per-host=1'", data->prefix);
             data->nreplicas_per_host = 1;
             // @TODO to be sure:
             // pe__clear_resource_flags(rsc, pcmk_rsc_unique);
         }
         return TRUE;
     }
     return FALSE;
 }
 
 static int
 create_ip_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
                    pcmk__bundle_replica_t *replica)
 {
     if(data->ip_range_start) {
         char *id = NULL;
         xmlNode *xml_ip = NULL;
         xmlNode *xml_obj = NULL;
 
         id = crm_strdup_printf("%s-ip-%s", data->prefix, replica->ipaddr);
         crm_xml_sanitize_id(id);
         xml_ip = create_resource(id, "heartbeat", "IPaddr2");
         free(id);
 
         xml_obj = create_xml_node(xml_ip, XML_TAG_ATTR_SETS);
         crm_xml_set_id(xml_obj, "%s-attributes-%d",
                        data->prefix, replica->offset);
 
         crm_create_nvpair_xml(xml_obj, NULL, "ip", replica->ipaddr);
         if(data->host_network) {
             crm_create_nvpair_xml(xml_obj, NULL, "nic", data->host_network);
         }
 
         if(data->host_netmask) {
             crm_create_nvpair_xml(xml_obj, NULL,
                                   "cidr_netmask", data->host_netmask);
 
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "cidr_netmask", "32");
         }
 
         xml_obj = create_xml_node(xml_ip, "operations");
         crm_create_op_xml(xml_obj, ID(xml_ip), PCMK_ACTION_MONITOR, "60s",
                           NULL);
 
         // TODO: Other ops? Timeouts and intervals from underlying resource?
 
         if (pe__unpack_resource(xml_ip, &replica->ip, parent,
                                 parent->cluster) != pcmk_rc_ok) {
             return pcmk_rc_unpack_error;
         }
 
         parent->children = g_list_append(parent->children, replica->ip);
     }
     return pcmk_rc_ok;
 }
 
 static const char*
 container_agent_str(enum pe__container_agent t)
 {
     switch (t) {
         case PE__CONTAINER_AGENT_DOCKER: return PE__CONTAINER_AGENT_DOCKER_S;
         case PE__CONTAINER_AGENT_RKT:    return PE__CONTAINER_AGENT_RKT_S;
         case PE__CONTAINER_AGENT_PODMAN: return PE__CONTAINER_AGENT_PODMAN_S;
         default: // PE__CONTAINER_AGENT_UNKNOWN
             break;
     }
     return PE__CONTAINER_AGENT_UNKNOWN_S;
 }
 
 static int
 create_container_resource(pcmk_resource_t *parent,
                           const pe__bundle_variant_data_t *data,
                           pcmk__bundle_replica_t *replica)
 {
     char *id = NULL;
     xmlNode *xml_container = NULL;
     xmlNode *xml_obj = NULL;
 
     // Agent-specific
     const char *hostname_opt = NULL;
     const char *env_opt = NULL;
     const char *agent_str = NULL;
     int volid = 0;  // rkt-only
 
     GString *buffer = NULL;
     GString *dbuffer = NULL;
 
     // Where syntax differences are drop-in replacements, set them now
     switch (data->agent_type) {
         case PE__CONTAINER_AGENT_DOCKER:
         case PE__CONTAINER_AGENT_PODMAN:
             hostname_opt = "-h ";
             env_opt = "-e ";
             break;
         case PE__CONTAINER_AGENT_RKT:
             hostname_opt = "--hostname=";
             env_opt = "--environment=";
             break;
         default:    // PE__CONTAINER_AGENT_UNKNOWN
             return pcmk_rc_unpack_error;
     }
     agent_str = container_agent_str(data->agent_type);
 
     buffer = g_string_sized_new(4096);
 
     id = crm_strdup_printf("%s-%s-%d", data->prefix, agent_str,
                            replica->offset);
     crm_xml_sanitize_id(id);
     xml_container = create_resource(id, "heartbeat", agent_str);
     free(id);
 
     xml_obj = create_xml_node(xml_container, XML_TAG_ATTR_SETS);
     crm_xml_set_id(xml_obj, "%s-attributes-%d", data->prefix, replica->offset);
 
     crm_create_nvpair_xml(xml_obj, NULL, "image", data->image);
     crm_create_nvpair_xml(xml_obj, NULL, "allow_pull", XML_BOOLEAN_TRUE);
     crm_create_nvpair_xml(xml_obj, NULL, "force_kill", XML_BOOLEAN_FALSE);
     crm_create_nvpair_xml(xml_obj, NULL, "reuse", XML_BOOLEAN_FALSE);
 
     if (data->agent_type == PE__CONTAINER_AGENT_DOCKER) {
         g_string_append(buffer, " --restart=no");
     }
 
     /* Set a container hostname only if we have an IP to map it to. The user can
      * set -h or --uts=host themselves if they want a nicer name for logs, but
      * this makes applications happy who need their  hostname to match the IP
      * they bind to.
      */
     if (data->ip_range_start != NULL) {
         g_string_append_printf(buffer, " %s%s-%d", hostname_opt, data->prefix,
                                replica->offset);
     }
     pcmk__g_strcat(buffer, " ", env_opt, "PCMK_stderr=1", NULL);
 
     if (data->container_network != NULL) {
         pcmk__g_strcat(buffer, " --net=", data->container_network, NULL);
     }
 
     if (data->control_port != NULL) {
         pcmk__g_strcat(buffer, " ", env_opt, "PCMK_" PCMK__ENV_REMOTE_PORT "=",
                        data->control_port, NULL);
     } else {
         g_string_append_printf(buffer, " %sPCMK_" PCMK__ENV_REMOTE_PORT "=%d",
                                env_opt, DEFAULT_REMOTE_PORT);
     }
 
     for (GList *iter = data->mounts; iter != NULL; iter = iter->next) {
         pe__bundle_mount_t *mount = (pe__bundle_mount_t *) iter->data;
         char *source = NULL;
 
         if (pcmk_is_set(mount->flags, pe__bundle_mount_subdir)) {
             source = crm_strdup_printf("%s/%s-%d", mount->source, data->prefix,
                                        replica->offset);
             pcmk__add_separated_word(&dbuffer, 1024, source, ",");
         }
 
         switch (data->agent_type) {
             case PE__CONTAINER_AGENT_DOCKER:
             case PE__CONTAINER_AGENT_PODMAN:
                 pcmk__g_strcat(buffer,
                                " -v ", pcmk__s(source, mount->source),
                                ":", mount->target, NULL);
 
                 if (mount->options != NULL) {
                     pcmk__g_strcat(buffer, ":", mount->options, NULL);
                 }
                 break;
             case PE__CONTAINER_AGENT_RKT:
                 g_string_append_printf(buffer,
                                        " --volume vol%d,kind=host,"
                                        "source=%s%s%s "
                                        "--mount volume=vol%d,target=%s",
                                        volid, pcmk__s(source, mount->source),
                                        (mount->options != NULL)? "," : "",
                                        pcmk__s(mount->options, ""),
                                        volid, mount->target);
                 volid++;
                 break;
             default:
                 break;
         }
         free(source);
     }
 
     for (GList *iter = data->ports; iter != NULL; iter = iter->next) {
         pe__bundle_port_t *port = (pe__bundle_port_t *) iter->data;
 
         switch (data->agent_type) {
             case PE__CONTAINER_AGENT_DOCKER:
             case PE__CONTAINER_AGENT_PODMAN:
                 if (replica->ipaddr != NULL) {
                     pcmk__g_strcat(buffer,
                                    " -p ", replica->ipaddr, ":", port->source,
                                    ":", port->target, NULL);
 
                 } else if (!pcmk__str_eq(data->container_network, "host",
                                          pcmk__str_none)) {
                     // No need to do port mapping if net == host
                     pcmk__g_strcat(buffer,
                                    " -p ", port->source, ":", port->target,
                                    NULL);
                 }
                 break;
             case PE__CONTAINER_AGENT_RKT:
                 if (replica->ipaddr != NULL) {
                     pcmk__g_strcat(buffer,
                                    " --port=", port->target,
                                    ":", replica->ipaddr, ":", port->source,
                                    NULL);
                 } else {
                     pcmk__g_strcat(buffer,
                                    " --port=", port->target, ":", port->source,
                                    NULL);
                 }
                 break;
             default:
                 break;
         }
     }
 
     /* @COMPAT: We should use pcmk__add_word() here, but we can't yet, because
      * it would cause restarts during rolling upgrades.
      *
      * In a previous version of the container resource creation logic, if
      * data->launcher_options is not NULL, we append
      * (" %s", data->launcher_options) even if data->launcher_options is an
      * empty string. Likewise for data->container_host_options. Using
      *
      *     pcmk__add_word(buffer, 0, data->launcher_options)
      *
      * removes that extra trailing space, causing a resource definition change.
      */
     if (data->launcher_options != NULL) {
         pcmk__g_strcat(buffer, " ", data->launcher_options, NULL);
     }
 
     if (data->container_host_options != NULL) {
         pcmk__g_strcat(buffer, " ", data->container_host_options, NULL);
     }
 
     crm_create_nvpair_xml(xml_obj, NULL, "run_opts",
                           (const char *) buffer->str);
     g_string_free(buffer, TRUE);
 
     crm_create_nvpair_xml(xml_obj, NULL, "mount_points",
                           (dbuffer != NULL)? (const char *) dbuffer->str : "");
     if (dbuffer != NULL) {
         g_string_free(dbuffer, TRUE);
     }
 
     if (replica->child != NULL) {
         if (data->container_command != NULL) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   data->container_command);
         } else {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   SBIN_DIR "/pacemaker-remoted");
         }
 
         /* TODO: Allow users to specify their own?
          *
          * We just want to know if the container is alive; we'll monitor the
          * child independently.
          */
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
 #if 0
         /* @TODO Consider supporting the use case where we can start and stop
          * resources, but not proxy local commands (such as setting node
          * attributes), by running the local executor in stand-alone mode.
          * However, this would probably be better done via ACLs as with other
          * Pacemaker Remote nodes.
          */
     } else if ((child != NULL) && data->untrusted) {
         crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                               CRM_DAEMON_DIR "/pacemaker-execd");
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd",
                               CRM_DAEMON_DIR "/pacemaker/cts-exec-helper -c poke");
 #endif
     } else {
         if (data->container_command != NULL) {
             crm_create_nvpair_xml(xml_obj, NULL, "run_cmd",
                                   data->container_command);
         }
 
         /* TODO: Allow users to specify their own?
          *
          * We don't know what's in the container, so we just want to know if it
          * is alive.
          */
         crm_create_nvpair_xml(xml_obj, NULL, "monitor_cmd", "/bin/true");
     }
 
     xml_obj = create_xml_node(xml_container, "operations");
     crm_create_op_xml(xml_obj, ID(xml_container), PCMK_ACTION_MONITOR, "60s",
                       NULL);
 
     // TODO: Other ops? Timeouts and intervals from underlying resource?
     if (pe__unpack_resource(xml_container, &replica->container, parent,
                             parent->cluster) != pcmk_rc_ok) {
         return pcmk_rc_unpack_error;
     }
     pe__set_resource_flags(replica->container, pcmk_rsc_replica_container);
     parent->children = g_list_append(parent->children, replica->container);
 
     return pcmk_rc_ok;
 }
 
 /*!
  * \brief Ban a node from a resource's (and its children's) allowed nodes list
  *
  * \param[in,out] rsc    Resource to modify
  * \param[in]     uname  Name of node to ban
  */
 static void
 disallow_node(pcmk_resource_t *rsc, const char *uname)
 {
     gpointer match = g_hash_table_lookup(rsc->allowed_nodes, uname);
 
     if (match) {
         ((pcmk_node_t *) match)->weight = -INFINITY;
         ((pcmk_node_t *) match)->rsc_discover_mode = pcmk_probe_never;
     }
     if (rsc->children) {
         g_list_foreach(rsc->children, (GFunc) disallow_node, (gpointer) uname);
     }
 }
 
 static int
 create_remote_resource(pcmk_resource_t *parent, pe__bundle_variant_data_t *data,
                        pcmk__bundle_replica_t *replica)
 {
     if (replica->child && valid_network(data)) {
         GHashTableIter gIter;
         pcmk_node_t *node = NULL;
         xmlNode *xml_remote = NULL;
         char *id = crm_strdup_printf("%s-%d", data->prefix, replica->offset);
         char *port_s = NULL;
         const char *uname = NULL;
         const char *connect_name = NULL;
 
         if (pe_find_resource(parent->cluster->resources, id) != NULL) {
             free(id);
             // The biggest hammer we have
             id = crm_strdup_printf("pcmk-internal-%s-remote-%d",
                                    replica->child->id, replica->offset);
             //@TODO return error instead of asserting?
             CRM_ASSERT(pe_find_resource(parent->cluster->resources,
                                         id) == NULL);
         }
 
         /* REMOTE_CONTAINER_HACK: Using "#uname" as the server name when the
          * connection does not have its own IP is a magic string that we use to
          * support nested remotes (i.e. a bundle running on a remote node).
          */
         connect_name = (replica->ipaddr? replica->ipaddr : "#uname");
 
         if (data->control_port == NULL) {
             port_s = pcmk__itoa(DEFAULT_REMOTE_PORT);
         }
 
         /* This sets replica->container as replica->remote's container, which is
          * similar to what happens with guest nodes. This is how the scheduler
          * knows that the bundle node is fenced by recovering the container, and
          * that remote should be ordered relative to the container.
          */
         xml_remote = pe_create_remote_xml(NULL, id, replica->container->id,
                                           NULL, NULL, NULL,
                                           connect_name, (data->control_port?
                                           data->control_port : port_s));
         free(port_s);
 
         /* Abandon our created ID, and pull the copy from the XML, because we
          * need something that will get freed during scheduler data cleanup to
          * use as the node ID and uname.
          */
         free(id);
         id = NULL;
         uname = ID(xml_remote);
 
         /* Ensure a node has been created for the guest (it may have already
          * been, if it has a permanent node attribute), and ensure its weight is
          * -INFINITY so no other resources can run on it.
          */
         node = pe_find_node(parent->cluster->nodes, uname);
         if (node == NULL) {
             node = pe_create_node(uname, uname, "remote", "-INFINITY",
                                   parent->cluster);
         } else {
             node->weight = -INFINITY;
         }
         node->rsc_discover_mode = pcmk_probe_never;
 
         /* unpack_remote_nodes() ensures that each remote node and guest node
          * has a pcmk_node_t entry. Ideally, it would do the same for bundle
          * nodes. Unfortunately, a bundle has to be mostly unpacked before it's
          * obvious what nodes will be needed, so we do it just above.
          *
          * Worse, that means that the node may have been utilized while
          * unpacking other resources, without our weight correction. The most
          * likely place for this to happen is when pe__unpack_resource() calls
          * resource_location() to set a default score in symmetric clusters.
          * This adds a node *copy* to each resource's allowed nodes, and these
          * copies will have the wrong weight.
          *
          * As a hacky workaround, fix those copies here.
          *
          * @TODO Possible alternative: ensure bundles are unpacked before other
          * resources, so the weight is correct before any copies are made.
          */
         g_list_foreach(parent->cluster->resources, (GFunc) disallow_node,
                        (gpointer) uname);
 
         replica->node = pe__copy_node(node);
         replica->node->weight = 500;
         replica->node->rsc_discover_mode = pcmk_probe_exclusive;
 
         /* Ensure the node shows up as allowed and with the correct discovery set */
         if (replica->child->allowed_nodes != NULL) {
             g_hash_table_destroy(replica->child->allowed_nodes);
         }
         replica->child->allowed_nodes = pcmk__strkey_table(NULL, free);
         g_hash_table_insert(replica->child->allowed_nodes,
                             (gpointer) replica->node->details->id,
                             pe__copy_node(replica->node));
 
         {
             pcmk_node_t *copy = pe__copy_node(replica->node);
             copy->weight = -INFINITY;
             g_hash_table_insert(replica->child->parent->allowed_nodes,
                                 (gpointer) replica->node->details->id, copy);
         }
         if (pe__unpack_resource(xml_remote, &replica->remote, parent,
                                 parent->cluster) != pcmk_rc_ok) {
             return pcmk_rc_unpack_error;
         }
 
         g_hash_table_iter_init(&gIter, replica->remote->allowed_nodes);
         while (g_hash_table_iter_next(&gIter, NULL, (void **)&node)) {
             if (pe__is_guest_or_remote_node(node)) {
                 /* Remote resources can only run on 'normal' cluster node */
                 node->weight = -INFINITY;
             }
         }
 
         replica->node->details->remote_rsc = replica->remote;
 
         // Ensure pe__is_guest_node() functions correctly immediately
         replica->remote->container = replica->container;
 
         /* A bundle's #kind is closer to "container" (guest node) than the
          * "remote" set by pe_create_node().
          */
         g_hash_table_insert(replica->node->details->attrs,
                             strdup(CRM_ATTR_KIND), strdup("container"));
 
         /* One effect of this is that setup_container() will add
          * replica->remote to replica->container's fillers, which will make
          * pe__resource_contains_guest_node() true for replica->container.
          *
          * replica->child does NOT get added to replica->container's fillers.
          * The only noticeable effect if it did would be for its fail count to
          * be taken into account when checking replica->container's migration
          * threshold.
          */
         parent->children = g_list_append(parent->children, replica->remote);
     }
     return pcmk_rc_ok;
 }
 
 static int
 create_replica_resources(pcmk_resource_t *parent,
                          pe__bundle_variant_data_t *data,
                          pcmk__bundle_replica_t *replica)
 {
     int rc = pcmk_rc_ok;
 
     rc = create_container_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     rc = create_ip_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     rc = create_remote_resource(parent, data, replica);
     if (rc != pcmk_rc_ok) {
         return rc;
     }
 
     if ((replica->child != NULL) && (replica->ipaddr != NULL)) {
         add_hash_param(replica->child->meta, "external-ip", replica->ipaddr);
     }
 
     if (replica->remote != NULL) {
         /*
          * Allow the remote connection resource to be allocated to a
          * different node than the one on which the container is active.
          *
          * This makes it possible to have Pacemaker Remote nodes running
          * containers with pacemaker-remoted inside in order to start
          * services inside those containers.
          */
         pe__set_resource_flags(replica->remote,
                                pcmk_rsc_remote_nesting_allowed);
     }
     return rc;
 }
 
 static void
 mount_add(pe__bundle_variant_data_t *bundle_data, const char *source,
           const char *target, const char *options, uint32_t flags)
 {
     pe__bundle_mount_t *mount = calloc(1, sizeof(pe__bundle_mount_t));
 
     CRM_ASSERT(mount != NULL);
     mount->source = strdup(source);
     mount->target = strdup(target);
     pcmk__str_update(&mount->options, options);
     mount->flags = flags;
     bundle_data->mounts = g_list_append(bundle_data->mounts, mount);
 }
 
 static void
 mount_free(pe__bundle_mount_t *mount)
 {
     free(mount->source);
     free(mount->target);
     free(mount->options);
     free(mount);
 }
 
 static void
 port_free(pe__bundle_port_t *port)
 {
     free(port->source);
     free(port->target);
     free(port);
 }
 
 static pcmk__bundle_replica_t *
 replica_for_remote(pcmk_resource_t *remote)
 {
     pcmk_resource_t *top = remote;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (top == NULL) {
         return NULL;
     }
 
     while (top->parent != NULL) {
         top = top->parent;
     }
 
     get_bundle_variant_data(bundle_data, top);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
 
         if (replica->remote == remote) {
             return replica;
         }
     }
     CRM_LOG_ASSERT(FALSE);
     return NULL;
 }
 
 bool
 pe__bundle_needs_remote_name(pcmk_resource_t *rsc)
 {
     const char *value;
     GHashTable *params = NULL;
 
     if (rsc == NULL) {
         return false;
     }
 
     // Use NULL node since pcmk__bundle_expand() uses that to set value
     params = pe_rsc_params(rsc, NULL, rsc->cluster);
     value = g_hash_table_lookup(params, XML_RSC_ATTR_REMOTE_RA_ADDR);
 
     return pcmk__str_eq(value, "#uname", pcmk__str_casei)
            && xml_contains_remote_node(rsc->xml);
 }
 
 const char *
 pe__add_bundle_remote_name(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler,
                            xmlNode *xml, const char *field)
 {
     // REMOTE_CONTAINER_HACK: Allow remote nodes that start containers with pacemaker remote inside
 
     pcmk_node_t *node = NULL;
     pcmk__bundle_replica_t *replica = NULL;
 
     if (!pe__bundle_needs_remote_name(rsc)) {
         return NULL;
     }
 
     replica = replica_for_remote(rsc);
     if (replica == NULL) {
         return NULL;
     }
 
     node = replica->container->allocated_to;
     if (node == NULL) {
         /* If it won't be running anywhere after the
          * transition, go with where it's running now.
          */
         node = pe__current_node(replica->container);
     }
 
     if(node == NULL) {
         crm_trace("Cannot determine address for bundle connection %s", rsc->id);
         return NULL;
     }
 
     crm_trace("Setting address for bundle connection %s to bundle host %s",
               rsc->id, pe__node_name(node));
     if(xml != NULL && field != NULL) {
         crm_xml_add(xml, field, node->details->uname);
     }
 
     return node->details->uname;
 }
 
 #define pe__set_bundle_mount_flags(mount_xml, flags, flags_to_set) do {     \
         flags = pcmk__set_flags_as(__func__, __LINE__, LOG_TRACE,           \
                                    "Bundle mount", ID(mount_xml), flags,    \
                                    (flags_to_set), #flags_to_set);          \
     } while (0)
 
 gboolean
 pe__unpack_bundle(pcmk_resource_t *rsc, pcmk_scheduler_t *scheduler)
 {
     const char *value = NULL;
     xmlNode *xml_obj = NULL;
     xmlNode *xml_resource = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     bool need_log_mount = TRUE;
 
     CRM_ASSERT(rsc != NULL);
     pcmk__rsc_trace(rsc, "Processing resource %s...", rsc->id);
 
     bundle_data = calloc(1, sizeof(pe__bundle_variant_data_t));
     rsc->variant_opaque = bundle_data;
     bundle_data->prefix = strdup(rsc->id);
 
     xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_DOCKER_S);
     if (xml_obj != NULL) {
         bundle_data->agent_type = PE__CONTAINER_AGENT_DOCKER;
     } else {
         xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_RKT_S);
         if (xml_obj != NULL) {
             bundle_data->agent_type = PE__CONTAINER_AGENT_RKT;
         } else {
             xml_obj = first_named_child(rsc->xml, PE__CONTAINER_AGENT_PODMAN_S);
             if (xml_obj != NULL) {
                 bundle_data->agent_type = PE__CONTAINER_AGENT_PODMAN;
             } else {
                 return FALSE;
             }
         }
     }
 
     // Use 0 for default, minimum, and invalid promoted-max
     value = crm_element_value(xml_obj, PCMK_META_PROMOTED_MAX);
     if (value == NULL) {
         // @COMPAT deprecated since 2.0.0
         value = crm_element_value(xml_obj, "masters");
     }
     pcmk__scan_min_int(value, &bundle_data->promoted_max, 0);
 
     // Default replicas to promoted-max if it was specified and 1 otherwise
     value = crm_element_value(xml_obj, "replicas");
     if ((value == NULL) && (bundle_data->promoted_max > 0)) {
         bundle_data->nreplicas = bundle_data->promoted_max;
     } else {
         pcmk__scan_min_int(value, &bundle_data->nreplicas, 1);
     }
 
     /*
      * Communication between containers on the same host via the
      * floating IPs only works if the container is started with:
      *   --userland-proxy=false --ip-masq=false
      */
     value = crm_element_value(xml_obj, "replicas-per-host");
     pcmk__scan_min_int(value, &bundle_data->nreplicas_per_host, 1);
     if (bundle_data->nreplicas_per_host == 1) {
         pe__clear_resource_flags(rsc, pcmk_rsc_unique);
     }
 
     bundle_data->container_command = crm_element_value_copy(xml_obj, "run-command");
     bundle_data->launcher_options = crm_element_value_copy(xml_obj, "options");
     bundle_data->image = crm_element_value_copy(xml_obj, "image");
     bundle_data->container_network = crm_element_value_copy(xml_obj, "network");
 
     xml_obj = first_named_child(rsc->xml, "network");
     if(xml_obj) {
 
         bundle_data->ip_range_start = crm_element_value_copy(xml_obj, "ip-range-start");
         bundle_data->host_netmask = crm_element_value_copy(xml_obj, "host-netmask");
         bundle_data->host_network = crm_element_value_copy(xml_obj, "host-interface");
         bundle_data->control_port = crm_element_value_copy(xml_obj, "control-port");
         value = crm_element_value(xml_obj, "add-host");
         if (crm_str_to_boolean(value, &bundle_data->add_host) != 1) {
             bundle_data->add_host = TRUE;
         }
 
         for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
              xml_child = pcmk__xe_next(xml_child)) {
 
             pe__bundle_port_t *port = calloc(1, sizeof(pe__bundle_port_t));
             port->source = crm_element_value_copy(xml_child, "port");
 
             if(port->source == NULL) {
                 port->source = crm_element_value_copy(xml_child, "range");
             } else {
                 port->target = crm_element_value_copy(xml_child, "internal-port");
             }
 
             if(port->source != NULL && strlen(port->source) > 0) {
                 if(port->target == NULL) {
                     port->target = strdup(port->source);
                 }
                 bundle_data->ports = g_list_append(bundle_data->ports, port);
 
             } else {
                 pcmk__config_err("Invalid port directive %s", ID(xml_child));
                 port_free(port);
             }
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "storage");
     for (xmlNode *xml_child = pcmk__xe_first_child(xml_obj); xml_child != NULL;
          xml_child = pcmk__xe_next(xml_child)) {
 
         const char *source = crm_element_value(xml_child, "source-dir");
         const char *target = crm_element_value(xml_child, "target-dir");
         const char *options = crm_element_value(xml_child, "options");
         int flags = pe__bundle_mount_none;
 
         if (source == NULL) {
             source = crm_element_value(xml_child, "source-dir-root");
             pe__set_bundle_mount_flags(xml_child, flags,
                                        pe__bundle_mount_subdir);
         }
 
         if (source && target) {
             mount_add(bundle_data, source, target, options, flags);
             if (strcmp(target, "/var/log") == 0) {
                 need_log_mount = FALSE;
             }
         } else {
             pcmk__config_err("Invalid mount directive %s", ID(xml_child));
         }
     }
 
     xml_obj = first_named_child(rsc->xml, "primitive");
     if (xml_obj && valid_network(bundle_data)) {
         char *value = NULL;
         xmlNode *xml_set = NULL;
 
         xml_resource = create_xml_node(NULL, XML_CIB_TAG_INCARNATION);
 
         /* @COMPAT We no longer use the <master> tag, but we need to keep it as
          * part of the resource name, so that bundles don't restart in a rolling
          * upgrade. (It also avoids needing to change regression tests.)
          */
         crm_xml_set_id(xml_resource, "%s-%s", bundle_data->prefix,
                       (bundle_data->promoted_max? "master"
                       : (const char *)xml_resource->name));
 
         xml_set = create_xml_node(xml_resource, XML_TAG_META_SETS);
         crm_xml_set_id(xml_set, "%s-%s-meta", bundle_data->prefix, xml_resource->name);
 
         crm_create_nvpair_xml(xml_set, NULL,
                               XML_RSC_ATTR_ORDERED, XML_BOOLEAN_TRUE);
 
         value = pcmk__itoa(bundle_data->nreplicas);
         crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_MAX, value);
         free(value);
 
         value = pcmk__itoa(bundle_data->nreplicas_per_host);
         crm_create_nvpair_xml(xml_set, NULL, PCMK_META_CLONE_NODE_MAX, value);
         free(value);
 
         crm_create_nvpair_xml(xml_set, NULL, XML_RSC_ATTR_UNIQUE,
                               pcmk__btoa(bundle_data->nreplicas_per_host > 1));
 
         if (bundle_data->promoted_max) {
             crm_create_nvpair_xml(xml_set, NULL,
                                   XML_RSC_ATTR_PROMOTABLE, XML_BOOLEAN_TRUE);
 
             value = pcmk__itoa(bundle_data->promoted_max);
             crm_create_nvpair_xml(xml_set, NULL, PCMK_META_PROMOTED_MAX, value);
             free(value);
         }
 
         //crm_xml_add(xml_obj, PCMK_XA_ID, bundle_data->prefix);
         add_node_copy(xml_resource, xml_obj);
 
     } else if(xml_obj) {
         pcmk__config_err("Cannot control %s inside %s without either "
                          "ip-range-start or control-port",
                          rsc->id, ID(xml_obj));
         return FALSE;
     }
 
     if(xml_resource) {
         int lpc = 0;
         GList *childIter = NULL;
         pe__bundle_port_t *port = NULL;
         GString *buffer = NULL;
 
         if (pe__unpack_resource(xml_resource, &(bundle_data->child), rsc,
                                 scheduler) != pcmk_rc_ok) {
             return FALSE;
         }
 
         /* Currently, we always map the default authentication key location
          * into the same location inside the container.
          *
          * Ideally, we would respect the host's PCMK_authkey_location, but:
          * - it may be different on different nodes;
          * - the actual connection will do extra checking to make sure the key
          *   file exists and is readable, that we can't do here on the DC
          * - tools such as crm_resource and crm_simulate may not have the same
          *   environment variables as the cluster, causing operation digests to
          *   differ
          *
          * Always using the default location inside the container is fine,
          * because we control the pacemaker_remote environment, and it avoids
          * having to pass another environment variable to the container.
          *
          * @TODO A better solution may be to have only pacemaker_remote use the
          * environment variable, and have the cluster nodes use a new
          * cluster option for key location. This would introduce the limitation
          * of the location being the same on all cluster nodes, but that's
          * reasonable.
          */
         mount_add(bundle_data, DEFAULT_REMOTE_KEY_LOCATION,
                   DEFAULT_REMOTE_KEY_LOCATION, NULL, pe__bundle_mount_none);
 
         if (need_log_mount) {
             mount_add(bundle_data, CRM_BUNDLE_DIR, "/var/log", NULL,
                       pe__bundle_mount_subdir);
         }
 
         port = calloc(1, sizeof(pe__bundle_port_t));
         if(bundle_data->control_port) {
             port->source = strdup(bundle_data->control_port);
         } else {
             /* If we wanted to respect PCMK_remote_port, we could use
              * crm_default_remote_port() here and elsewhere in this file instead
              * of DEFAULT_REMOTE_PORT.
              *
              * However, it gains nothing, since we control both the container
              * environment and the connection resource parameters, and the user
              * can use a different port if desired by setting control-port.
              */
             port->source = pcmk__itoa(DEFAULT_REMOTE_PORT);
         }
         port->target = strdup(port->source);
         bundle_data->ports = g_list_append(bundle_data->ports, port);
 
         buffer = g_string_sized_new(1024);
         for (childIter = bundle_data->child->children; childIter != NULL;
              childIter = childIter->next) {
 
             pcmk__bundle_replica_t *replica = NULL;
 
             replica = calloc(1, sizeof(pcmk__bundle_replica_t));
             replica->child = childIter->data;
             replica->child->exclusive_discover = TRUE;
             replica->offset = lpc++;
 
             // Ensure the child's notify gets set based on the underlying primitive's value
             if (pcmk_is_set(replica->child->flags, pcmk_rsc_notify)) {
                 pe__set_resource_flags(bundle_data->child, pcmk_rsc_notify);
             }
 
             allocate_ip(bundle_data, replica, buffer);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
-            bundle_data->attribute_target = g_hash_table_lookup(replica->child->meta,
-                                                                XML_RSC_ATTR_TARGET);
+            bundle_data->attribute_target =
+                g_hash_table_lookup(replica->child->meta,
+                                    PCMK_META_CONTAINER_ATTR_TARGET);
         }
         bundle_data->container_host_options = g_string_free(buffer, FALSE);
 
         if (bundle_data->attribute_target) {
-            g_hash_table_replace(rsc->meta, strdup(XML_RSC_ATTR_TARGET),
+            g_hash_table_replace(rsc->meta,
+                                 strdup(PCMK_META_CONTAINER_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
             g_hash_table_replace(bundle_data->child->meta,
-                                 strdup(XML_RSC_ATTR_TARGET),
+                                 strdup(PCMK_META_CONTAINER_ATTR_TARGET),
                                  strdup(bundle_data->attribute_target));
         }
 
     } else {
         // Just a naked container, no pacemaker-remote
         GString *buffer = g_string_sized_new(1024);
 
         for (int lpc = 0; lpc < bundle_data->nreplicas; lpc++) {
             pcmk__bundle_replica_t *replica = NULL;
 
             replica = calloc(1, sizeof(pcmk__bundle_replica_t));
             replica->offset = lpc;
             allocate_ip(bundle_data, replica, buffer);
             bundle_data->replicas = g_list_append(bundle_data->replicas,
                                                   replica);
         }
         bundle_data->container_host_options = g_string_free(buffer, FALSE);
     }
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
 
         if (create_replica_resources(rsc, bundle_data, replica) != pcmk_rc_ok) {
             pcmk__config_err("Failed unpacking resource %s", rsc->id);
             rsc->fns->free(rsc);
             return FALSE;
         }
 
         /* Utilization needs special handling for bundles. It makes no sense for
          * the inner primitive to have utilization, because it is tied
          * one-to-one to the guest node created by the container resource -- and
          * there's no way to set capacities for that guest node anyway.
          *
          * What the user really wants is to configure utilization for the
          * container. However, the schema only allows utilization for
          * primitives, and the container resource is implicit anyway, so the
          * user can *only* configure utilization for the inner primitive. If
          * they do, move the primitive's utilization values to the container.
          *
          * @TODO This means that bundles without an inner primitive can't have
          * utilization. An alternative might be to allow utilization values in
          * the top-level bundle XML in the schema, and copy those to each
          * container.
          */
         if (replica->child != NULL) {
             GHashTable *empty = replica->container->utilization;
 
             replica->container->utilization = replica->child->utilization;
             replica->child->utilization = empty;
         }
     }
 
     if (bundle_data->child) {
         rsc->children = g_list_append(rsc->children, bundle_data->child);
     }
     return TRUE;
 }
 
 static int
 replica_resource_active(pcmk_resource_t *rsc, gboolean all)
 {
     if (rsc) {
         gboolean child_active = rsc->fns->active(rsc, all);
 
         if (child_active && !all) {
             return TRUE;
         } else if (!child_active && all) {
             return FALSE;
         }
     }
     return -1;
 }
 
 gboolean
 pe__bundle_active(pcmk_resource_t *rsc, gboolean all)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     GList *iter = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (iter = bundle_data->replicas; iter != NULL; iter = iter->next) {
         pcmk__bundle_replica_t *replica = iter->data;
         int rsc_active;
 
         rsc_active = replica_resource_active(replica->ip, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->child, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->container, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
 
         rsc_active = replica_resource_active(replica->remote, all);
         if (rsc_active >= 0) {
             return (gboolean) rsc_active;
         }
     }
 
     /* If "all" is TRUE, we've already checked that no resources were inactive,
      * so return TRUE; if "all" is FALSE, we didn't find any active resources,
      * so return FALSE.
      */
     return all;
 }
 
 /*!
  * \internal
  * \brief Find the bundle replica corresponding to a given node
  *
  * \param[in] bundle  Top-level bundle resource
  * \param[in] node    Node to search for
  *
  * \return Bundle replica if found, NULL otherwise
  */
 pcmk_resource_t *
 pe__find_bundle_replica(const pcmk_resource_t *bundle, const pcmk_node_t *node)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_ASSERT(bundle && node);
 
     get_bundle_variant_data(bundle_data, bundle);
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica && replica->node);
         if (replica->node->details == node->details) {
             return replica->child;
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 print_rsc_in_list(pcmk_resource_t *rsc, const char *pre_text, long options,
                   void *print_data)
 {
     if (rsc != NULL) {
         if (options & pe_print_html) {
             status_print("<li>");
         }
         rsc->fns->print(rsc, pre_text, options, print_data);
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 bundle_print_xml(pcmk_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (pre_text == NULL) {
         pre_text = "";
     }
     child_text = crm_strdup_printf("%s        ", pre_text);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     status_print("%s<bundle ", pre_text);
     status_print(PCMK_XA_ID "=\"%s\" ", rsc->id);
     status_print("type=\"%s\" ", container_agent_str(bundle_data->agent_type));
     status_print("image=\"%s\" ", bundle_data->image);
     status_print("unique=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_unique));
     status_print("managed=\"%s\" ",
                  pe__rsc_bool_str(rsc, pcmk_rsc_managed));
     status_print("failed=\"%s\" ", pe__rsc_bool_str(rsc, pcmk_rsc_failed));
     status_print(">\n");
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         status_print("%s    <replica " PCMK_XA_ID "=\"%d\">\n",
                      pre_text, replica->offset);
         print_rsc_in_list(replica->ip, child_text, options, print_data);
         print_rsc_in_list(replica->child, child_text, options, print_data);
         print_rsc_in_list(replica->container, child_text, options, print_data);
         print_rsc_in_list(replica->remote, child_text, options, print_data);
         status_print("%s    </replica>\n", pre_text);
     }
     status_print("%s</bundle>\n", pre_text);
     free(child_text);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
                   "GList *")
 int
 pe__bundle_xml(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean printed_header = FALSE;
     gboolean print_everything = TRUE;
 
     const char *desc = NULL;
 
     CRM_ASSERT(rsc != NULL);
     
     get_bundle_variant_data(bundle_data, rsc);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
         char *id = NULL;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (!print_everything && !print_ip && !print_child && !print_ctnr && !print_remote) {
             continue;
         }
 
         if (!printed_header) {
             printed_header = TRUE;
 
             desc = pe__resource_description(rsc, show_opts);
 
             rc = pe__name_and_nvpairs_xml(out, true, "bundle", 8,
                      PCMK_XA_ID, rsc->id,
                      "type", container_agent_str(bundle_data->agent_type),
                      "image", bundle_data->image,
                      "unique", pe__rsc_bool_str(rsc, pcmk_rsc_unique),
                      "maintenance",
                      pe__rsc_bool_str(rsc, pcmk_rsc_maintenance),
                      "managed", pe__rsc_bool_str(rsc, pcmk_rsc_managed),
                      "failed", pe__rsc_bool_str(rsc, pcmk_rsc_failed),
                      PCMK_XA_DESCRIPTION, desc);
             CRM_ASSERT(rc == pcmk_rc_ok);
         }
 
         id = pcmk__itoa(replica->offset);
         rc = pe__name_and_nvpairs_xml(out, true, "replica", 1, PCMK_XA_ID, id);
         free(id);
         CRM_ASSERT(rc == pcmk_rc_ok);
 
         if (print_ip) {
             out->message(out, crm_map_element_name(replica->ip->xml), show_opts,
                          replica->ip, only_node, only_rsc);
         }
 
         if (print_child) {
             out->message(out, crm_map_element_name(replica->child->xml), show_opts,
                          replica->child, only_node, only_rsc);
         }
 
         if (print_ctnr) {
             out->message(out, crm_map_element_name(replica->container->xml), show_opts,
                          replica->container, only_node, only_rsc);
         }
 
         if (print_remote) {
             out->message(out, crm_map_element_name(replica->remote->xml), show_opts,
                          replica->remote, only_node, only_rsc);
         }
 
         pcmk__output_xml_pop_parent(out); // replica
     }
 
     if (printed_header) {
         pcmk__output_xml_pop_parent(out); // bundle
     }
 
     return rc;
 }
 
 static void
 pe__bundle_replica_output_html(pcmk__output_t *out,
                                pcmk__bundle_replica_t *replica,
                                pcmk_node_t *node, uint32_t show_opts)
 {
     pcmk_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_html(out, rsc, buffer, node, show_opts);
 }
 
 /*!
  * \internal
  * \brief Get a string describing a resource's unmanaged state or lack thereof
  *
  * \param[in] rsc  Resource to describe
  *
  * \return A string indicating that a resource is in maintenance mode or
  *         otherwise unmanaged, or an empty string otherwise
  */
 static const char *
 get_unmanaged_str(const pcmk_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pcmk_rsc_maintenance)) {
         return " (maintenance)";
     }
     if (!pcmk_is_set(rsc->flags, pcmk_rsc_managed)) {
         return " (unmanaged)";
     }
     return "";
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
                   "GList *")
 int
 pe__bundle_html(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const char *desc = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     CRM_ASSERT(rsc != NULL);
 
     get_bundle_variant_data(bundle_data, rsc);
 
     desc = pe__resource_description(rsc, show_opts);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->begin_list(out, NULL, NULL, "Replica[%d]", replica->offset);
             }
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_show_opts, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_show_opts, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_show_opts, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_show_opts, replica->remote, only_node, only_rsc);
             }
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->end_list(out);
             }
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             pe__bundle_replica_output_html(out, replica, pe__current_node(replica->container),
                                            show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 static void
 pe__bundle_replica_output_text(pcmk__output_t *out,
                                pcmk__bundle_replica_t *replica,
                                pcmk_node_t *node, uint32_t show_opts)
 {
     const pcmk_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     pe__common_output_text(out, rsc, buffer, node, show_opts);
 }
 
 PCMK__OUTPUT_ARGS("bundle", "uint32_t", "pcmk_resource_t *", "GList *",
                   "GList *")
 int
 pe__bundle_text(pcmk__output_t *out, va_list args)
 {
     uint32_t show_opts = va_arg(args, uint32_t);
     pcmk_resource_t *rsc = va_arg(args, pcmk_resource_t *);
     GList *only_node = va_arg(args, GList *);
     GList *only_rsc = va_arg(args, GList *);
 
     const char *desc = NULL;
     pe__bundle_variant_data_t *bundle_data = NULL;
     int rc = pcmk_rc_no_output;
     gboolean print_everything = TRUE;
 
     desc = pe__resource_description(rsc, show_opts);
     
     get_bundle_variant_data(bundle_data, rsc);
 
     CRM_ASSERT(rsc != NULL);
 
     if (rsc->fns->is_filtered(rsc, only_rsc, TRUE)) {
         return rc;
     }
 
     print_everything = pcmk__str_in_list(rsc->id, only_rsc, pcmk__str_star_matches);
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
         gboolean print_ip, print_child, print_ctnr, print_remote;
 
         CRM_ASSERT(replica);
 
         if (pcmk__rsc_filtered_by_node(replica->container, only_node)) {
             continue;
         }
 
         print_ip = replica->ip != NULL &&
                    !replica->ip->fns->is_filtered(replica->ip, only_rsc, print_everything);
         print_child = replica->child != NULL &&
                       !replica->child->fns->is_filtered(replica->child, only_rsc, print_everything);
         print_ctnr = !replica->container->fns->is_filtered(replica->container, only_rsc, print_everything);
         print_remote = replica->remote != NULL &&
                        !replica->remote->fns->is_filtered(replica->remote, only_rsc, print_everything);
 
         if (pcmk_is_set(show_opts, pcmk_show_implicit_rscs) ||
             (print_everything == FALSE && (print_ip || print_child || print_ctnr || print_remote))) {
             /* The text output messages used below require pe_print_implicit to
              * be set to do anything.
              */
             uint32_t new_show_opts = show_opts | pcmk_show_implicit_rscs;
 
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 out->list_item(out, NULL, "Replica[%d]", replica->offset);
             }
 
             out->begin_list(out, NULL, NULL, NULL);
 
             if (print_ip) {
                 out->message(out, crm_map_element_name(replica->ip->xml),
                              new_show_opts, replica->ip, only_node, only_rsc);
             }
 
             if (print_child) {
                 out->message(out, crm_map_element_name(replica->child->xml),
                              new_show_opts, replica->child, only_node, only_rsc);
             }
 
             if (print_ctnr) {
                 out->message(out, crm_map_element_name(replica->container->xml),
                              new_show_opts, replica->container, only_node, only_rsc);
             }
 
             if (print_remote) {
                 out->message(out, crm_map_element_name(replica->remote->xml),
                              new_show_opts, replica->remote, only_node, only_rsc);
             }
 
             out->end_list(out);
         } else if (print_everything == FALSE && !(print_ip || print_child || print_ctnr || print_remote)) {
             continue;
         } else {
             PCMK__OUTPUT_LIST_HEADER(out, FALSE, rc, "Container bundle%s: %s [%s]%s%s%s%s%s",
                                      (bundle_data->nreplicas > 1)? " set" : "",
                                      rsc->id, bundle_data->image,
                                      pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
                                      desc ? " (" : "", desc ? desc : "", desc ? ")" : "",
                                      get_unmanaged_str(rsc));
 
             pe__bundle_replica_output_text(out, replica, pe__current_node(replica->container),
                                            show_opts);
         }
     }
 
     PCMK__OUTPUT_LIST_FOOTER(out, rc);
     return rc;
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 static void
 print_bundle_replica(pcmk__bundle_replica_t *replica, const char *pre_text,
                      long options, void *print_data)
 {
     pcmk_node_t *node = NULL;
     pcmk_resource_t *rsc = replica->child;
 
     int offset = 0;
     char buffer[LINE_MAX];
 
     if(rsc == NULL) {
         rsc = replica->container;
     }
 
     if (replica->remote) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->remote));
     } else {
         offset += snprintf(buffer + offset, LINE_MAX - offset, "%s",
                            rsc_printable_id(replica->container));
     }
     if (replica->ipaddr) {
         offset += snprintf(buffer + offset, LINE_MAX - offset, " (%s)",
                            replica->ipaddr);
     }
 
     node = pe__current_node(replica->container);
     common_print(rsc, pre_text, buffer, node, options, print_data);
 }
 
 /*!
  * \internal
  * \deprecated This function will be removed in a future release
  */
 void
 pe__print_bundle(pcmk_resource_t *rsc, const char *pre_text, long options,
                  void *print_data)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     char *child_text = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     if (options & pe_print_xml) {
         bundle_print_xml(rsc, pre_text, options, print_data);
         return;
     }
 
     get_bundle_variant_data(bundle_data, rsc);
 
     if (pre_text == NULL) {
         pre_text = " ";
     }
 
     status_print("%sContainer bundle%s: %s [%s]%s%s\n",
                  pre_text, ((bundle_data->nreplicas > 1)? " set" : ""),
                  rsc->id, bundle_data->image,
                  pcmk_is_set(rsc->flags, pcmk_rsc_unique)? " (unique)" : "",
                  pcmk_is_set(rsc->flags, pcmk_rsc_managed)? "" : " (unmanaged)");
     if (options & pe_print_html) {
         status_print("<br />\n<ul>\n");
     }
 
 
     for (GList *gIter = bundle_data->replicas; gIter != NULL;
          gIter = gIter->next) {
         pcmk__bundle_replica_t *replica = gIter->data;
 
         CRM_ASSERT(replica);
         if (options & pe_print_html) {
             status_print("<li>");
         }
 
         if (pcmk_is_set(options, pe_print_implicit)) {
             child_text = crm_strdup_printf("     %s", pre_text);
             if (pcmk__list_of_multiple(bundle_data->replicas)) {
                 status_print("  %sReplica[%d]\n", pre_text, replica->offset);
             }
             if (options & pe_print_html) {
                 status_print("<br />\n<ul>\n");
             }
             print_rsc_in_list(replica->ip, child_text, options, print_data);
             print_rsc_in_list(replica->container, child_text, options, print_data);
             print_rsc_in_list(replica->remote, child_text, options, print_data);
             print_rsc_in_list(replica->child, child_text, options, print_data);
             if (options & pe_print_html) {
                 status_print("</ul>\n");
             }
         } else {
             child_text = crm_strdup_printf("%s  ", pre_text);
             print_bundle_replica(replica, child_text, options, print_data);
         }
         free(child_text);
 
         if (options & pe_print_html) {
             status_print("</li>\n");
         }
     }
     if (options & pe_print_html) {
         status_print("</ul>\n");
     }
 }
 
 static void
 free_bundle_replica(pcmk__bundle_replica_t *replica)
 {
     if (replica == NULL) {
         return;
     }
 
     if (replica->node) {
         free(replica->node);
         replica->node = NULL;
     }
 
     if (replica->ip) {
         free_xml(replica->ip->xml);
         replica->ip->xml = NULL;
         replica->ip->fns->free(replica->ip);
         replica->ip = NULL;
     }
     if (replica->container) {
         free_xml(replica->container->xml);
         replica->container->xml = NULL;
         replica->container->fns->free(replica->container);
         replica->container = NULL;
     }
     if (replica->remote) {
         free_xml(replica->remote->xml);
         replica->remote->xml = NULL;
         replica->remote->fns->free(replica->remote);
         replica->remote = NULL;
     }
     free(replica->ipaddr);
     free(replica);
 }
 
 void
 pe__free_bundle(pcmk_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
     CRM_CHECK(rsc != NULL, return);
 
     get_bundle_variant_data(bundle_data, rsc);
     pcmk__rsc_trace(rsc, "Freeing %s", rsc->id);
 
     free(bundle_data->prefix);
     free(bundle_data->image);
     free(bundle_data->control_port);
     free(bundle_data->host_network);
     free(bundle_data->host_netmask);
     free(bundle_data->ip_range_start);
     free(bundle_data->container_network);
     free(bundle_data->launcher_options);
     free(bundle_data->container_command);
     g_free(bundle_data->container_host_options);
 
     g_list_free_full(bundle_data->replicas,
                      (GDestroyNotify) free_bundle_replica);
     g_list_free_full(bundle_data->mounts, (GDestroyNotify)mount_free);
     g_list_free_full(bundle_data->ports, (GDestroyNotify)port_free);
     g_list_free(rsc->children);
 
     if(bundle_data->child) {
         free_xml(bundle_data->child->xml);
         bundle_data->child->xml = NULL;
         bundle_data->child->fns->free(bundle_data->child);
     }
     common_free(rsc);
 }
 
 enum rsc_role_e
 pe__bundle_resource_state(const pcmk_resource_t *rsc, gboolean current)
 {
     enum rsc_role_e container_role = pcmk_role_unknown;
     return container_role;
 }
 
 /*!
  * \brief Get the number of configured replicas in a bundle
  *
  * \param[in] rsc  Bundle resource
  *
  * \return Number of configured replicas, or 0 on error
  */
 int
 pe_bundle_replicas(const pcmk_resource_t *rsc)
 {
     if ((rsc == NULL) || (rsc->variant != pcmk_rsc_variant_bundle)) {
         return 0;
     } else {
         pe__bundle_variant_data_t *bundle_data = NULL;
 
         get_bundle_variant_data(bundle_data, rsc);
         return bundle_data->nreplicas;
     }
 }
 
 void
 pe__count_bundle(pcmk_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     for (GList *item = bundle_data->replicas; item != NULL; item = item->next) {
         pcmk__bundle_replica_t *replica = item->data;
 
         if (replica->ip) {
             replica->ip->fns->count(replica->ip);
         }
         if (replica->child) {
             replica->child->fns->count(replica->child);
         }
         if (replica->container) {
             replica->container->fns->count(replica->container);
         }
         if (replica->remote) {
             replica->remote->fns->count(replica->remote);
         }
     }
 }
 
 gboolean
 pe__bundle_is_filtered(const pcmk_resource_t *rsc, GList *only_rsc,
                        gboolean check_parent)
 {
     gboolean passes = FALSE;
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     if (pcmk__str_in_list(rsc_printable_id(rsc), only_rsc, pcmk__str_star_matches)) {
         passes = TRUE;
     } else {
         get_bundle_variant_data(bundle_data, rsc);
 
         for (GList *gIter = bundle_data->replicas; gIter != NULL; gIter = gIter->next) {
             pcmk__bundle_replica_t *replica = gIter->data;
 
             if (replica->ip != NULL && !replica->ip->fns->is_filtered(replica->ip, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->child != NULL && !replica->child->fns->is_filtered(replica->child, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (!replica->container->fns->is_filtered(replica->container, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             } else if (replica->remote != NULL && !replica->remote->fns->is_filtered(replica->remote, only_rsc, FALSE)) {
                 passes = TRUE;
                 break;
             }
         }
     }
 
     return !passes;
 }
 
 /*!
  * \internal
  * \brief Get a list of a bundle's containers
  *
  * \param[in] bundle  Bundle resource
  *
  * \return Newly created list of \p bundle's containers
  * \note It is the caller's responsibility to free the result with
  *       g_list_free().
  */
 GList *
 pe__bundle_containers(const pcmk_resource_t *bundle)
 {
     GList *containers = NULL;
     const pe__bundle_variant_data_t *data = NULL;
 
     get_bundle_variant_data(data, bundle);
     for (GList *iter = data->replicas; iter != NULL; iter = iter->next) {
         pcmk__bundle_replica_t *replica = iter->data;
 
         containers = g_list_append(containers, replica->container);
     }
     return containers;
 }
 
 // Bundle implementation of pcmk_rsc_methods_t:active_node()
 pcmk_node_t *
 pe__bundle_active_node(const pcmk_resource_t *rsc, unsigned int *count_all,
                        unsigned int *count_clean)
 {
     pcmk_node_t *active = NULL;
     pcmk_node_t *node = NULL;
     pcmk_resource_t *container = NULL;
     GList *containers = NULL;
     GList *iter = NULL;
     GHashTable *nodes = NULL;
     const pe__bundle_variant_data_t *data = NULL;
 
     if (count_all != NULL) {
         *count_all = 0;
     }
     if (count_clean != NULL) {
         *count_clean = 0;
     }
     if (rsc == NULL) {
         return NULL;
     }
 
     /* For the purposes of this method, we only care about where the bundle's
      * containers are active, so build a list of active containers.
      */
     get_bundle_variant_data(data, rsc);
     for (iter = data->replicas; iter != NULL; iter = iter->next) {
         pcmk__bundle_replica_t *replica = iter->data;
 
         if (replica->container->running_on != NULL) {
             containers = g_list_append(containers, replica->container);
         }
     }
     if (containers == NULL) {
         return NULL;
     }
 
     /* If the bundle has only a single active container, just use that
      * container's method. If live migration is ever supported for bundle
      * containers, this will allow us to prefer the migration source when there
      * is only one container and it is migrating. For now, this just lets us
      * avoid creating the nodes table.
      */
     if (pcmk__list_of_1(containers)) {
         container = containers->data;
         node = container->fns->active_node(container, count_all, count_clean);
         g_list_free(containers);
         return node;
     }
 
     // Add all containers' active nodes to a hash table (for uniqueness)
     nodes = g_hash_table_new(NULL, NULL);
     for (iter = containers; iter != NULL; iter = iter->next) {
         container = iter->data;
 
         for (GList *node_iter = container->running_on; node_iter != NULL;
              node_iter = node_iter->next) {
             node = node_iter->data;
 
             // If insert returns true, we haven't counted this node yet
             if (g_hash_table_insert(nodes, (gpointer) node->details,
                                     (gpointer) node)
                 && !pe__count_active_node(rsc, node, &active, count_all,
                                           count_clean)) {
                 goto done;
             }
         }
     }
 
 done:
     g_list_free(containers);
     g_hash_table_destroy(nodes);
     return active;
 }
 
 /*!
  * \internal
  * \brief Get maximum bundle resource instances per node
  *
  * \param[in] rsc  Bundle resource to check
  *
  * \return Maximum number of \p rsc instances that can be active on one node
  */
 unsigned int
 pe__bundle_max_per_node(const pcmk_resource_t *rsc)
 {
     pe__bundle_variant_data_t *bundle_data = NULL;
 
     get_bundle_variant_data(bundle_data, rsc);
     CRM_ASSERT(bundle_data->nreplicas_per_host >= 0);
     return (unsigned int) bundle_data->nreplicas_per_host;
 }
diff --git a/lib/pengine/common.c b/lib/pengine/common.c
index 9c938783af..69377d7fad 100644
--- a/lib/pengine/common.c
+++ b/lib/pengine/common.c
@@ -1,624 +1,624 @@
 /*
  * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/crm.h>
 #include <crm/msg_xml.h>
 #include <crm/common/xml.h>
 #include <crm/common/util.h>
 
 #include <glib.h>
 
 #include <crm/common/scheduler_internal.h>
 #include <crm/pengine/internal.h>
 
 static bool
 check_placement_strategy(const char *value)
 {
     return pcmk__strcase_any_of(value, "default", "utilization", "minimal",
                            "balanced", NULL);
 }
 
 static pcmk__cluster_option_t pe_opts[] = {
     /* name, old name, type, allowed values,
      * default value, validator,
      * short description,
      * long description
      */
     {
         PCMK_OPT_NO_QUORUM_POLICY, NULL, "select",
             "stop, freeze, ignore, demote, suicide",
         "stop", pcmk__valid_no_quorum_policy,
         N_("What to do when the cluster does not have quorum"),
         NULL
     },
     {
         PCMK_OPT_SYMMETRIC_CLUSTER, NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether resources can run on any node by default"),
         NULL
     },
     {
         PCMK_OPT_MAINTENANCE_MODE, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether the cluster should refrain from monitoring, starting, "
             "and stopping resources"),
         NULL
     },
     {
         PCMK_OPT_START_FAILURE_IS_FATAL, NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether a start failure should prevent a resource from being "
             "recovered on the same node"),
         N_("When true, the cluster will immediately ban a resource from a node "
             "if it fails to start there. When false, the cluster will instead "
             "check the resource's fail count against its migration-threshold.")
     },
     {
         PCMK_OPT_ENABLE_STARTUP_PROBES, NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether the cluster should check for active resources during start-up"),
         NULL
     },
     {
         PCMK_OPT_SHUTDOWN_LOCK, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether to lock resources to a cleanly shut down node"),
         N_("When true, resources active on a node when it is cleanly shut down "
             "are kept \"locked\" to that node (not allowed to run elsewhere) "
             "until they start again on that node after it rejoins (or for at "
             "most shutdown-lock-limit, if set). Stonith resources and "
             "Pacemaker Remote connections are never locked. Clone and bundle "
             "instances and the promoted role of promotable clones are "
             "currently never locked, though support could be added in a future "
             "release.")
     },
     {
         PCMK_OPT_SHUTDOWN_LOCK_LIMIT, NULL, "time", NULL,
         "0", pcmk__valid_interval_spec,
         N_("Do not lock resources to a cleanly shut down node longer than "
            "this"),
         N_("If shutdown-lock is true and this is set to a nonzero time "
             "duration, shutdown locks will expire after this much time has "
             "passed since the shutdown was initiated, even if the node has not "
             "rejoined.")
     },
 
     // Fencing-related options
     {
         PCMK_OPT_STONITH_ENABLED, NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("*** Advanced Use Only *** "
             "Whether nodes may be fenced as part of recovery"),
         N_("If false, unresponsive nodes are immediately assumed to be harmless, "
             "and resources that were active on them may be recovered "
             "elsewhere. This can result in a \"split-brain\" situation, "
             "potentially leading to data loss and/or service unavailability.")
     },
     {
         PCMK_OPT_STONITH_ACTION, NULL, "select", "reboot, off, poweroff",
         PCMK_ACTION_REBOOT, pcmk__is_fencing_action,
         N_("Action to send to fence device when a node needs to be fenced "
             "(\"poweroff\" is a deprecated alias for \"off\")"),
         NULL
     },
     {
         PCMK_OPT_STONITH_TIMEOUT, NULL, "time", NULL,
         "60s", pcmk__valid_interval_spec,
         N_("How long to wait for on, off, and reboot fence actions to complete "
             "by default"),
         NULL
     },
     {
         PCMK_OPT_HAVE_WATCHDOG, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether watchdog integration is enabled"),
         N_("This is set automatically by the cluster according to whether SBD "
             "is detected to be in use. User-configured values are ignored. "
             "The value `true` is meaningful if diskless SBD is used and "
             "`stonith-watchdog-timeout` is nonzero. In that case, if fencing "
             "is required, watchdog-based self-fencing will be performed via "
             "SBD without requiring a fencing resource explicitly configured.")
     },
     {
         PCMK_OPT_CONCURRENT_FENCING, NULL, "boolean", NULL,
         PCMK__CONCURRENT_FENCING_DEFAULT, pcmk__valid_boolean,
         N_("Allow performing fencing operations in parallel"),
         NULL
     },
     {
         PCMK_OPT_STARTUP_FENCING, NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("*** Advanced Use Only *** Whether to fence unseen nodes at start-up"),
         N_("Setting this to false may lead to a \"split-brain\" situation, "
             "potentially leading to data loss and/or service unavailability.")
     },
     {
         PCMK_OPT_PRIORITY_FENCING_DELAY, NULL, "time", NULL,
         "0", pcmk__valid_interval_spec,
         N_("Apply fencing delay targeting the lost nodes with the highest total resource priority"),
         N_("Apply specified delay for the fencings that are targeting the lost "
             "nodes with the highest total resource priority in case we don't "
             "have the majority of the nodes in our cluster partition, so that "
             "the more significant nodes potentially win any fencing match, "
             "which is especially meaningful under split-brain of 2-node "
             "cluster. A promoted resource instance takes the base priority + 1 "
             "on calculation if the base priority is not 0. Any static/random "
             "delays that are introduced by `pcmk_delay_base/max` configured "
             "for the corresponding fencing resources will be added to this "
             "delay. This delay should be significantly greater than, safely "
             "twice, the maximum `pcmk_delay_base/max`. By default, priority "
             "fencing delay is disabled.")
     },
     {
         PCMK_OPT_NODE_PENDING_TIMEOUT, NULL, "time", NULL,
         "0", pcmk__valid_interval_spec,
         N_("How long to wait for a node that has joined the cluster to join "
            "the controller process group"),
         N_("Fence nodes that do not join the controller process group within "
            "this much time after joining the cluster, to allow the cluster "
            "to continue managing resources. A value of 0 means never fence "
            "pending nodes. Setting the value to 2h means fence nodes after "
            "2 hours.")
     },
     {
         PCMK_OPT_CLUSTER_DELAY, NULL, "time", NULL,
         "60s", pcmk__valid_interval_spec,
         N_("Maximum time for node-to-node communication"),
         N_("The node elected Designated Controller (DC) will consider an action "
             "failed if it does not get a response from the node executing the "
             "action within this time (after considering the action's own "
             "timeout). The \"correct\" value will depend on the speed and "
             "load of your network and cluster nodes.")
     },
     {
         PCMK_OPT_BATCH_LIMIT, NULL, "integer", NULL,
         "0", pcmk__valid_int,
         N_("Maximum number of jobs that the cluster may execute in parallel "
             "across all nodes"),
         N_("The \"correct\" value will depend on the speed and load of your "
             "network and cluster nodes. If set to 0, the cluster will "
             "impose a dynamically calculated limit when any node has a "
             "high load.")
     },
     {
         PCMK_OPT_MIGRATION_LIMIT, NULL, "integer", NULL,
         "-1", pcmk__valid_int,
         N_("The number of live migration actions that the cluster is allowed "
             "to execute in parallel on a node (-1 means no limit)")
     },
 
     /* Orphans and stopping */
     {
         PCMK_OPT_STOP_ALL_RESOURCES, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("Whether the cluster should stop all active resources"),
         NULL
     },
     {
         PCMK_OPT_STOP_ORPHAN_RESOURCES, NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether to stop resources that were removed from the configuration"),
         NULL
     },
     {
         PCMK_OPT_STOP_ORPHAN_ACTIONS, NULL, "boolean", NULL,
         "true", pcmk__valid_boolean,
         N_("Whether to cancel recurring actions removed from the configuration"),
         NULL
     },
     {
         PCMK__OPT_REMOVE_AFTER_STOP, NULL, "boolean", NULL,
         "false", pcmk__valid_boolean,
         N_("*** Deprecated *** Whether to remove stopped resources from "
             "the executor"),
         N_("Values other than default are poorly tested and potentially dangerous."
             " This option will be removed in a future release.")
     },
 
     /* Storing inputs */
     {
         PCMK_OPT_PE_ERROR_SERIES_MAX, NULL, "integer", NULL,
         "-1", pcmk__valid_int,
         N_("The number of scheduler inputs resulting in errors to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
     {
         PCMK_OPT_PE_WARN_SERIES_MAX, NULL, "integer", NULL,
         "5000", pcmk__valid_int,
         N_("The number of scheduler inputs resulting in warnings to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
     {
         PCMK_OPT_PE_INPUT_SERIES_MAX, NULL, "integer", NULL,
         "4000", pcmk__valid_int,
         N_("The number of scheduler inputs without errors or warnings to save"),
         N_("Zero to disable, -1 to store unlimited.")
     },
 
     /* Node health */
     {
         PCMK_OPT_NODE_HEALTH_STRATEGY, NULL, "select",
         PCMK__VALUE_NONE ", " PCMK__VALUE_MIGRATE_ON_RED ", "
             PCMK__VALUE_ONLY_GREEN ", " PCMK__VALUE_PROGRESSIVE ", "
             PCMK__VALUE_CUSTOM,
         PCMK__VALUE_NONE, pcmk__validate_health_strategy,
         N_("How cluster should react to node health attributes"),
         N_("Requires external entities to create node attributes (named with "
             "the prefix \"#health\") with values \"red\", "
             "\"yellow\", or \"green\".")
     },
     {
         PCMK_OPT_NODE_HEALTH_BASE, NULL, "integer", NULL,
         "0", pcmk__valid_int,
         N_("Base health score assigned to a node"),
         N_("Only used when \"node-health-strategy\" is set to \"progressive\".")
     },
     {
         PCMK_OPT_NODE_HEALTH_GREEN, NULL, "integer", NULL,
         "0", pcmk__valid_int,
         N_("The score to use for a node health attribute whose value is \"green\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
     {
         PCMK_OPT_NODE_HEALTH_YELLOW, NULL, "integer", NULL,
         "0", pcmk__valid_int,
         N_("The score to use for a node health attribute whose value is \"yellow\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
     {
         PCMK_OPT_NODE_HEALTH_RED, NULL, "integer", NULL,
         "-INFINITY", pcmk__valid_int,
         N_("The score to use for a node health attribute whose value is \"red\""),
         N_("Only used when \"node-health-strategy\" is set to \"custom\" or \"progressive\".")
     },
 
     /*Placement Strategy*/
     {
         PCMK_OPT_PLACEMENT_STRATEGY, NULL, "select",
             "default, utilization, minimal, balanced",
         "default", check_placement_strategy,
         N_("How the cluster should allocate resources to nodes"),
         NULL
     },
 };
 
 void
 pe_metadata(pcmk__output_t *out)
 {
     const char *desc_short = "Pacemaker scheduler options";
     const char *desc_long = "Cluster options used by Pacemaker's scheduler";
 
     gchar *s = pcmk__format_option_metadata("pacemaker-schedulerd", desc_short,
                                             desc_long, pe_opts,
                                             PCMK__NELEM(pe_opts));
     out->output_xml(out, "metadata", s);
     g_free(s);
 }
 
 void
 verify_pe_options(GHashTable * options)
 {
     pcmk__validate_cluster_options(options, pe_opts, PCMK__NELEM(pe_opts));
 }
 
 const char *
 pe_pref(GHashTable * options, const char *name)
 {
     return pcmk__cluster_option(options, pe_opts, PCMK__NELEM(pe_opts), name);
 }
 
 const char *
 fail2text(enum action_fail_response fail)
 {
     const char *result = "<unknown>";
 
     switch (fail) {
         case pcmk_on_fail_ignore:
             result = "ignore";
             break;
         case pcmk_on_fail_demote:
             result = "demote";
             break;
         case pcmk_on_fail_block:
             result = "block";
             break;
         case pcmk_on_fail_restart:
             result = "recover";
             break;
         case pcmk_on_fail_ban:
             result = "migrate";
             break;
         case pcmk_on_fail_stop:
             result = "stop";
             break;
         case pcmk_on_fail_fence_node:
             result = "fence";
             break;
         case pcmk_on_fail_standby_node:
             result = "standby";
             break;
         case pcmk_on_fail_restart_container:
             result = "restart-container";
             break;
         case pcmk_on_fail_reset_remote:
             result = "reset-remote";
             break;
     }
     return result;
 }
 
 enum action_tasks
 text2task(const char *task)
 {
     if (pcmk__str_eq(task, PCMK_ACTION_STOP, pcmk__str_casei)) {
         return pcmk_action_stop;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_STOPPED, pcmk__str_casei)) {
         return pcmk_action_stopped;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_START, pcmk__str_casei)) {
         return pcmk_action_start;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_RUNNING, pcmk__str_casei)) {
         return pcmk_action_started;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_DO_SHUTDOWN, pcmk__str_casei)) {
         return pcmk_action_shutdown;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_STONITH, pcmk__str_casei)) {
         return pcmk_action_fence;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_MONITOR, pcmk__str_casei)) {
         return pcmk_action_monitor;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFY, pcmk__str_casei)) {
         return pcmk_action_notify;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_NOTIFIED, pcmk__str_casei)) {
         return pcmk_action_notified;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTE, pcmk__str_casei)) {
         return pcmk_action_promote;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTE, pcmk__str_casei)) {
         return pcmk_action_demote;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_PROMOTED, pcmk__str_casei)) {
         return pcmk_action_promoted;
 
     } else if (pcmk__str_eq(task, PCMK_ACTION_DEMOTED, pcmk__str_casei)) {
         return pcmk_action_demoted;
     }
     return pcmk_action_unspecified;
 }
 
 const char *
 task2text(enum action_tasks task)
 {
     const char *result = "<unknown>";
 
     switch (task) {
         case pcmk_action_unspecified:
             result = "no_action";
             break;
         case pcmk_action_stop:
             result = PCMK_ACTION_STOP;
             break;
         case pcmk_action_stopped:
             result = PCMK_ACTION_STOPPED;
             break;
         case pcmk_action_start:
             result = PCMK_ACTION_START;
             break;
         case pcmk_action_started:
             result = PCMK_ACTION_RUNNING;
             break;
         case pcmk_action_shutdown:
             result = PCMK_ACTION_DO_SHUTDOWN;
             break;
         case pcmk_action_fence:
             result = PCMK_ACTION_STONITH;
             break;
         case pcmk_action_monitor:
             result = PCMK_ACTION_MONITOR;
             break;
         case pcmk_action_notify:
             result = PCMK_ACTION_NOTIFY;
             break;
         case pcmk_action_notified:
             result = PCMK_ACTION_NOTIFIED;
             break;
         case pcmk_action_promote:
             result = PCMK_ACTION_PROMOTE;
             break;
         case pcmk_action_promoted:
             result = PCMK_ACTION_PROMOTED;
             break;
         case pcmk_action_demote:
             result = PCMK_ACTION_DEMOTE;
             break;
         case pcmk_action_demoted:
             result = PCMK_ACTION_DEMOTED;
             break;
     }
 
     return result;
 }
 
 const char *
 role2text(enum rsc_role_e role)
 {
     switch (role) {
         case pcmk_role_stopped:
             return PCMK__ROLE_STOPPED;
 
         case pcmk_role_started:
             return PCMK__ROLE_STARTED;
 
         case pcmk_role_unpromoted:
 #ifdef PCMK__COMPAT_2_0
             return PCMK__ROLE_UNPROMOTED_LEGACY;
 #else
             return PCMK__ROLE_UNPROMOTED;
 #endif
 
         case pcmk_role_promoted:
 #ifdef PCMK__COMPAT_2_0
             return PCMK__ROLE_PROMOTED_LEGACY;
 #else
             return PCMK__ROLE_PROMOTED;
 #endif
 
         default: // pcmk_role_unknown
             return PCMK__ROLE_UNKNOWN;
     }
 }
 
 enum rsc_role_e
 text2role(const char *role)
 {
     if (pcmk__str_eq(role, PCMK__ROLE_UNKNOWN,
                      pcmk__str_casei|pcmk__str_null_matches)) {
         return pcmk_role_unknown;
     } else if (pcmk__str_eq(role, PCMK__ROLE_STOPPED, pcmk__str_casei)) {
         return pcmk_role_stopped;
     } else if (pcmk__str_eq(role, PCMK__ROLE_STARTED, pcmk__str_casei)) {
         return pcmk_role_started;
     } else if (pcmk__strcase_any_of(role, PCMK__ROLE_UNPROMOTED,
                                     PCMK__ROLE_UNPROMOTED_LEGACY, NULL)) {
         return pcmk_role_unpromoted;
     } else if (pcmk__strcase_any_of(role, PCMK__ROLE_PROMOTED,
                                     PCMK__ROLE_PROMOTED_LEGACY, NULL)) {
         return pcmk_role_promoted;
     }
     return pcmk_role_unknown; // Invalid role given
 }
 
 void
 add_hash_param(GHashTable * hash, const char *name, const char *value)
 {
     CRM_CHECK(hash != NULL, return);
 
     crm_trace("Adding name='%s' value='%s' to hash table",
               pcmk__s(name, "<null>"), pcmk__s(value, "<null>"));
     if (name == NULL || value == NULL) {
         return;
 
     } else if (pcmk__str_eq(value, "#default", pcmk__str_casei)) {
         return;
 
     } else if (g_hash_table_lookup(hash, name) == NULL) {
         g_hash_table_insert(hash, strdup(name), strdup(value));
     }
 }
 
 /*!
  * \internal
  * \brief Look up an attribute value on the appropriate node
  *
- * If \p node is a guest node and either the \c XML_RSC_ATTR_TARGET meta
- * attribute is set to "host" for \p rsc or \p force_host is \c true, query the
- * attribute on the node's host. Otherwise, query the attribute on \p node
+ * If \p node is a guest node and either the \c PCMK_META_CONTAINER_ATTR_TARGET
+ * meta attribute is set to "host" for \p rsc or \p force_host is \c true, query
+ * the attribute on the node's host. Otherwise, query the attribute on \p node
  * itself.
  *
  * \param[in] node        Node to query attribute value on by default
  * \param[in] name        Name of attribute to query
  * \param[in] rsc         Resource on whose behalf we're querying
  * \param[in] node_type   Type of resource location lookup
  * \param[in] force_host  Force a lookup on the guest node's host, regardless of
- *                        the \c XML_RSC_ATTR_TARGET value
+ *                        the \c PCMK_META_CONTAINER_ATTR_TARGET value
  *
  * \return Value of the attribute on \p node or on the host of \p node
  *
  * \note If \p force_host is \c true, \p node \e must be a guest node.
  */
 const char *
 pe__node_attribute_calculated(const pcmk_node_t *node, const char *name,
                               const pcmk_resource_t *rsc,
                               enum pcmk__rsc_node node_type,
                               bool force_host)
 {
     // @TODO: Use pe__is_guest_node() after merging libpe_{rules,status}
     bool is_guest = (node != NULL)
                     && (node->details->type == pcmk_node_variant_remote)
                     && (node->details->remote_rsc != NULL)
                     && (node->details->remote_rsc->container != NULL);
     const char *source = NULL;
     const char *node_type_s = NULL;
     const char *reason = NULL;
 
     const pcmk_resource_t *container = NULL;
     const pcmk_node_t *host = NULL;
 
     CRM_ASSERT((node != NULL) && (name != NULL) && (rsc != NULL)
                && (!force_host || is_guest));
 
-    /* Ignore XML_RSC_ATTR_TARGET if node is not a guest node. This represents a
-     * user configuration error.
+    /* Ignore PCMK_META_CONTAINER_ATTR_TARGET if node is not a guest node. This
+     * represents a user configuration error.
      */
-    source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
+    source = g_hash_table_lookup(rsc->meta, PCMK_META_CONTAINER_ATTR_TARGET);
     if (!force_host
         && (!is_guest || !pcmk__str_eq(source, "host", pcmk__str_casei))) {
 
         return g_hash_table_lookup(node->details->attrs, name);
     }
 
     container = node->details->remote_rsc->container;
 
     switch (node_type) {
         case pcmk__rsc_node_assigned:
             node_type_s = "assigned";
             host = container->allocated_to;
             if (host == NULL) {
                 reason = "not assigned";
             }
             break;
 
         case pcmk__rsc_node_current:
             node_type_s = "current";
 
             if (container->running_on != NULL) {
                 host = container->running_on->data;
             }
             if (host == NULL) {
                 reason = "inactive";
             }
             break;
 
         default:
             // Add support for other enum pcmk__rsc_node values if needed
             CRM_ASSERT(false);
             break;
     }
 
     if (host != NULL) {
         const char *value = g_hash_table_lookup(host->details->attrs, name);
 
         pcmk__rsc_trace(rsc,
                         "%s: Value lookup for %s on %s container host %s %s%s",
                         rsc->id, name, node_type_s, pe__node_name(host),
                         ((value != NULL)? "succeeded: " : "failed"),
                         pcmk__s(value, ""));
         return value;
     }
     pcmk__rsc_trace(rsc,
                     "%s: Not looking for %s on %s container host: %s is %s",
                     rsc->id, name, node_type_s, container->id, reason);
     return NULL;
 }
 
 const char *
 pe_node_attribute_raw(const pcmk_node_t *node, const char *name)
 {
     if(node == NULL) {
         return NULL;
     }
     return g_hash_table_lookup(node->details->attrs, name);
 }
diff --git a/lib/pengine/pe_notif.c b/lib/pengine/pe_notif.c
index f3725e8ebc..2b73a7020f 100644
--- a/lib/pengine/pe_notif.c
+++ b/lib/pengine/pe_notif.c
@@ -1,1008 +1,1008 @@
 /*
- * Copyright 2004-2023 the Pacemaker project contributors
+ * Copyright 2004-2024 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/msg_xml.h>
 
 #include <crm/pengine/internal.h>
 #include <pacemaker-internal.h>
 
 #include "pe_status_private.h"
 
 typedef struct notify_entry_s {
     const pcmk_resource_t *rsc;
     const pcmk_node_t *node;
 } notify_entry_t;
 
 /*!
  * \internal
  * \brief Compare two notification entries
  *
  * Compare two notification entries, where the one with the alphabetically first
  * resource name (or if equal, node name) sorts as first, with NULL sorting as
  * less than non-NULL.
  *
  * \param[in] a  First notification entry to compare
  * \param[in] b  Second notification entry to compare
  *
  * \return -1 if \p a sorts before \p b, 0 if they are equal, otherwise 1
  */
 static gint
 compare_notify_entries(gconstpointer a, gconstpointer b)
 {
     int tmp;
     const notify_entry_t *entry_a = a;
     const notify_entry_t *entry_b = b;
 
     // NULL a or b is not actually possible
     if ((entry_a == NULL) && (entry_b == NULL)) {
         return 0;
     }
     if (entry_a == NULL) {
         return 1;
     }
     if (entry_b == NULL) {
         return -1;
     }
 
     // NULL resources sort first
     if ((entry_a->rsc == NULL) && (entry_b->rsc == NULL)) {
         return 0;
     }
     if (entry_a->rsc == NULL) {
         return 1;
     }
     if (entry_b->rsc == NULL) {
         return -1;
     }
 
     // Compare resource names
     tmp = strcmp(entry_a->rsc->id, entry_b->rsc->id);
     if (tmp != 0) {
         return tmp;
     }
 
     // Otherwise NULL nodes sort first
     if ((entry_a->node == NULL) && (entry_b->node == NULL)) {
         return 0;
     }
     if (entry_a->node == NULL) {
         return 1;
     }
     if (entry_b->node == NULL) {
         return -1;
     }
 
     // Finally, compare node names
     return strcmp(entry_a->node->details->id, entry_b->node->details->id);
 }
 
 /*!
  * \internal
  * \brief Duplicate a notification entry
  *
  * \param[in] entry  Entry to duplicate
  *
  * \return Newly allocated duplicate of \p entry
  * \note It is the caller's responsibility to free the return value.
  */
 static notify_entry_t *
 dup_notify_entry(const notify_entry_t *entry)
 {
     notify_entry_t *dup = calloc(1, sizeof(notify_entry_t));
 
     CRM_ASSERT(dup != NULL);
     dup->rsc = entry->rsc;
     dup->node = entry->node;
     return dup;
 }
 
 /*!
  * \internal
  * \brief Given a list of nodes, create strings with node names
  *
  * \param[in]  list             List of nodes (as pcmk_node_t *)
  * \param[out] all_node_names   If not NULL, will be set to space-separated list
  *                              of the names of all nodes in \p list
  * \param[out] host_node_names  Same as \p all_node_names, except active
  *                              guest nodes will list the name of their host
  *
  * \note The caller is responsible for freeing the output argument values using
  *       \p g_string_free().
  */
 static void
 get_node_names(const GList *list, GString **all_node_names,
                GString **host_node_names)
 {
     if (all_node_names != NULL) {
         *all_node_names = NULL;
     }
     if (host_node_names != NULL) {
         *host_node_names = NULL;
     }
 
     for (const GList *iter = list; iter != NULL; iter = iter->next) {
         const pcmk_node_t *node = (const pcmk_node_t *) iter->data;
 
         if (node->details->uname == NULL) {
             continue;
         }
 
         // Always add to list of all node names
         if (all_node_names != NULL) {
             pcmk__add_word(all_node_names, 1024, node->details->uname);
         }
 
         // Add to host node name list if appropriate
         if (host_node_names != NULL) {
             if (pe__is_guest_node(node)
                 && (node->details->remote_rsc->container->running_on != NULL)) {
                 node = pe__current_node(node->details->remote_rsc->container);
                 if (node->details->uname == NULL) {
                     continue;
                 }
             }
             pcmk__add_word(host_node_names, 1024, node->details->uname);
         }
     }
 
     if ((all_node_names != NULL) && (*all_node_names == NULL)) {
         *all_node_names = g_string_new(" ");
     }
     if ((host_node_names != NULL) && (*host_node_names == NULL)) {
         *host_node_names = g_string_new(" ");
     }
 }
 
 /*!
  * \internal
  * \brief Create strings of instance and node names from notification entries
  *
  * \param[in,out] list        List of notification entries (will be sorted here)
  * \param[out]    rsc_names   If not NULL, will be set to space-separated list
  *                            of clone instances from \p list
  * \param[out]    node_names  If not NULL, will be set to space-separated list
  *                            of node names from \p list
  *
  * \return (Possibly new) head of sorted \p list
  * \note The caller is responsible for freeing the output argument values using
  *       \p g_list_free_full() and \p g_string_free().
  */
 static GList *
 notify_entries_to_strings(GList *list, GString **rsc_names,
                           GString **node_names)
 {
     const char *last_rsc_id = NULL;
 
     // Initialize output lists to NULL
     if (rsc_names != NULL) {
         *rsc_names = NULL;
     }
     if (node_names != NULL) {
         *node_names = NULL;
     }
 
     // Sort input list for user-friendliness (and ease of filtering duplicates)
     list = g_list_sort(list, compare_notify_entries);
 
     for (GList *gIter = list; gIter != NULL; gIter = gIter->next) {
         notify_entry_t *entry = (notify_entry_t *) gIter->data;
 
         // Entry must have a resource (with ID)
         CRM_LOG_ASSERT((entry != NULL) && (entry->rsc != NULL)
                        && (entry->rsc->id != NULL));
         if ((entry == NULL) || (entry->rsc == NULL)
             || (entry->rsc->id == NULL)) {
             continue;
         }
 
         // Entry must have a node unless listing inactive resources
         CRM_LOG_ASSERT((node_names == NULL) || (entry->node != NULL));
         if ((node_names != NULL) && (entry->node == NULL)) {
             continue;
         }
 
         // Don't add duplicates of a particular clone instance
         if (pcmk__str_eq(entry->rsc->id, last_rsc_id, pcmk__str_none)) {
             continue;
         }
         last_rsc_id = entry->rsc->id;
 
         if (rsc_names != NULL) {
             pcmk__add_word(rsc_names, 1024, entry->rsc->id);
         }
         if ((node_names != NULL) && (entry->node->details->uname != NULL)) {
             pcmk__add_word(node_names, 1024, entry->node->details->uname);
         }
     }
 
     // If there are no entries, return "empty" lists
     if ((rsc_names != NULL) && (*rsc_names == NULL)) {
         *rsc_names = g_string_new(" ");
     }
     if ((node_names != NULL) && (*node_names == NULL)) {
         *node_names = g_string_new(" ");
     }
 
     return list;
 }
 
 /*!
  * \internal
  * \brief Copy a meta-attribute into a notify action
  *
  * \param[in]     key        Name of meta-attribute to copy
  * \param[in]     value      Value of meta-attribute to copy
  * \param[in,out] user_data  Notify action to copy into
  */
 static void
 copy_meta_to_notify(gpointer key, gpointer value, gpointer user_data)
 {
     pcmk_action_t *notify = (pcmk_action_t *) user_data;
 
     /* Any existing meta-attributes (for example, the action timeout) are for
      * the notify action itself, so don't override those.
      */
     if (g_hash_table_lookup(notify->meta, (const char *) key) != NULL) {
         return;
     }
 
     g_hash_table_insert(notify->meta, strdup((const char *) key),
                         strdup((const char *) value));
 }
 
 static void
 add_notify_data_to_action_meta(const notify_data_t *n_data,
                                pcmk_action_t *action)
 {
     for (const GSList *item = n_data->keys; item; item = item->next) {
         const pcmk_nvpair_t *nvpair = (const pcmk_nvpair_t *) item->data;
 
         add_hash_param(action->meta, nvpair->name, nvpair->value);
     }
 }
 
 /*!
  * \internal
  * \brief Create a new notify pseudo-action for a clone resource
  *
  * \param[in,out] rsc           Clone resource that notification is for
  * \param[in]     action        Action to use in notify action key
  * \param[in]     notif_action  PCMK_ACTION_NOTIFY or PCMK_ACTION_NOTIFIED
  * \param[in]     notif_type    "pre", "post", "confirmed-pre", "confirmed-post"
  *
  * \return Newly created notify pseudo-action
  */
 static pcmk_action_t *
 new_notify_pseudo_action(pcmk_resource_t *rsc, const pcmk_action_t *action,
                          const char *notif_action, const char *notif_type)
 {
     pcmk_action_t *notify = NULL;
 
     notify = custom_action(rsc,
                            pcmk__notify_key(rsc->id, notif_type, action->task),
                            notif_action, NULL,
                            pcmk_is_set(action->flags, pcmk_action_optional),
                            rsc->cluster);
     pe__set_action_flags(notify, pcmk_action_pseudo);
     add_hash_param(notify->meta, "notify_key_type", notif_type);
     add_hash_param(notify->meta, "notify_key_operation", action->task);
     return notify;
 }
 
 /*!
  * \internal
  * \brief Create a new notify action for a clone instance
  *
  * \param[in,out] rsc          Clone instance that notification is for
  * \param[in]     node         Node that notification is for
  * \param[in,out] op           Action that notification is for
  * \param[in,out] notify_done  Parent pseudo-action for notifications complete
  * \param[in]     n_data       Notification values to add to action meta-data
  *
  * \return Newly created notify action
  */
 static pcmk_action_t *
 new_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
                   pcmk_action_t *op, pcmk_action_t *notify_done,
                   const notify_data_t *n_data)
 {
     char *key = NULL;
     pcmk_action_t *notify_action = NULL;
     const char *value = NULL;
     const char *task = NULL;
     const char *skip_reason = NULL;
 
     CRM_CHECK((rsc != NULL) && (node != NULL), return NULL);
 
     // Ensure we have all the info we need
     if (op == NULL) {
         skip_reason = "no action";
     } else if (notify_done == NULL) {
         skip_reason = "no parent notification";
     } else if (!node->details->online) {
         skip_reason = "node offline";
     } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
         skip_reason = "original action not runnable";
     }
     if (skip_reason != NULL) {
         pcmk__rsc_trace(rsc, "Skipping notify action for %s on %s: %s",
                         rsc->id, pe__node_name(node), skip_reason);
         return NULL;
     }
 
     value = g_hash_table_lookup(op->meta, "notify_type");     // "pre" or "post"
     task = g_hash_table_lookup(op->meta, "notify_operation"); // original action
 
     pcmk__rsc_trace(rsc, "Creating notify action for %s on %s (%s-%s)",
                     rsc->id, pe__node_name(node), value, task);
 
     // Create the notify action
     key = pcmk__notify_key(rsc->id, value, task);
     notify_action = custom_action(rsc, key, op->task, node,
                                   pcmk_is_set(op->flags, pcmk_action_optional),
                                   rsc->cluster);
 
     // Add meta-data to notify action
     g_hash_table_foreach(op->meta, copy_meta_to_notify, notify_action);
     add_notify_data_to_action_meta(n_data, notify_action);
 
     // Order notify after original action and before parent notification
     order_actions(op, notify_action, pcmk__ar_ordered);
     order_actions(notify_action, notify_done, pcmk__ar_ordered);
     return notify_action;
 }
 
 /*!
  * \internal
  * \brief Create a new "post-" notify action for a clone instance
  *
  * \param[in,out] rsc     Clone instance that notification is for
  * \param[in]     node    Node that notification is for
  * \param[in,out] n_data  Notification values to add to action meta-data
  */
 static void
 new_post_notify_action(pcmk_resource_t *rsc, const pcmk_node_t *node,
                        notify_data_t *n_data)
 {
     pcmk_action_t *notify = NULL;
 
     CRM_ASSERT(n_data != NULL);
 
     // Create the "post-" notify action for specified instance
     notify = new_notify_action(rsc, node, n_data->post, n_data->post_done,
                                n_data);
     if (notify != NULL) {
         notify->priority = INFINITY;
     }
 
     // Order recurring monitors after all "post-" notifications complete
     if (n_data->post_done == NULL) {
         return;
     }
     for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
         pcmk_action_t *mon = (pcmk_action_t *) iter->data;
         const char *interval_ms_s = NULL;
 
         interval_ms_s = g_hash_table_lookup(mon->meta,
                                             XML_LRM_ATTR_INTERVAL_MS);
         if (pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
             || pcmk__str_eq(mon->task, PCMK_ACTION_CANCEL, pcmk__str_none)) {
             continue; // Not a recurring monitor
         }
         order_actions(n_data->post_done, mon, pcmk__ar_ordered);
     }
 }
 
 /*!
  * \internal
  * \brief Create and order notification pseudo-actions for a clone action
  *
  * In addition to the actual notify actions needed for each clone instance,
  * clone notifications also require pseudo-actions to provide ordering points
  * in the notification process. This creates the notification data, along with
  * appropriate pseudo-actions and their orderings.
  *
  * For example, the ordering sequence for starting a clone is:
  *
  *     "pre-" notify pseudo-action for clone
  *     -> "pre-" notify actions for each clone instance
  *     -> "pre-" notifications complete pseudo-action for clone
  *     -> start actions for each clone instance
  *     -> "started" pseudo-action for clone
  *     -> "post-" notify pseudo-action for clone
  *     -> "post-" notify actions for each clone instance
  *     -> "post-" notifications complete pseudo-action for clone
  *
  * \param[in,out] rsc       Clone that notifications are for
  * \param[in]     task      Name of action that notifications are for
  * \param[in,out] action    If not NULL, create a "pre-" pseudo-action ordered
  *                          before a "pre-" complete pseudo-action, ordered
  *                          before this action
  * \param[in,out] complete  If not NULL, create a "post-" pseudo-action ordered
  *                          after this action, and a "post-" complete
  *                          pseudo-action ordered after that
  *
  * \return Newly created notification data
  */
 notify_data_t *
 pe__action_notif_pseudo_ops(pcmk_resource_t *rsc, const char *task,
                             pcmk_action_t *action, pcmk_action_t *complete)
 {
     notify_data_t *n_data = NULL;
 
     if (!pcmk_is_set(rsc->flags, pcmk_rsc_notify)) {
         return NULL;
     }
 
     n_data = calloc(1, sizeof(notify_data_t));
     CRM_ASSERT(n_data != NULL);
 
     n_data->action = task;
 
     if (action != NULL) { // Need "pre-" pseudo-actions
 
         // Create "pre-" notify pseudo-action for clone
         n_data->pre = new_notify_pseudo_action(rsc, action, PCMK_ACTION_NOTIFY,
                                                "pre");
         pe__set_action_flags(n_data->pre, pcmk_action_runnable);
         add_hash_param(n_data->pre->meta, "notify_type", "pre");
         add_hash_param(n_data->pre->meta, "notify_operation", n_data->action);
 
         // Create "pre-" notifications complete pseudo-action for clone
         n_data->pre_done = new_notify_pseudo_action(rsc, action,
                                                     PCMK_ACTION_NOTIFIED,
                                                     "confirmed-pre");
         pe__set_action_flags(n_data->pre_done, pcmk_action_runnable);
         add_hash_param(n_data->pre_done->meta, "notify_type", "pre");
         add_hash_param(n_data->pre_done->meta,
                        "notify_operation", n_data->action);
 
         // Order "pre-" -> "pre-" complete -> original action
         order_actions(n_data->pre, n_data->pre_done, pcmk__ar_ordered);
         order_actions(n_data->pre_done, action, pcmk__ar_ordered);
     }
 
     if (complete != NULL) { // Need "post-" pseudo-actions
 
         // Create "post-" notify pseudo-action for clone
         n_data->post = new_notify_pseudo_action(rsc, complete,
                                                 PCMK_ACTION_NOTIFY, "post");
         n_data->post->priority = INFINITY;
         if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
             pe__set_action_flags(n_data->post, pcmk_action_runnable);
         } else {
             pe__clear_action_flags(n_data->post, pcmk_action_runnable);
         }
         add_hash_param(n_data->post->meta, "notify_type", "post");
         add_hash_param(n_data->post->meta, "notify_operation", n_data->action);
 
         // Create "post-" notifications complete pseudo-action for clone
         n_data->post_done = new_notify_pseudo_action(rsc, complete,
                                                      PCMK_ACTION_NOTIFIED,
                                                      "confirmed-post");
         n_data->post_done->priority = INFINITY;
         if (pcmk_is_set(complete->flags, pcmk_action_runnable)) {
             pe__set_action_flags(n_data->post_done, pcmk_action_runnable);
         } else {
             pe__clear_action_flags(n_data->post_done, pcmk_action_runnable);
         }
         add_hash_param(n_data->post_done->meta, "notify_type", "post");
         add_hash_param(n_data->post_done->meta,
                        "notify_operation", n_data->action);
 
         // Order original action complete -> "post-" -> "post-" complete
         order_actions(complete, n_data->post, pcmk__ar_first_implies_then);
         order_actions(n_data->post, n_data->post_done,
                       pcmk__ar_first_implies_then);
     }
 
     // If we created both, order "pre-" complete -> "post-"
     if ((action != NULL) && (complete != NULL)) {
         order_actions(n_data->pre_done, n_data->post, pcmk__ar_ordered);
     }
     return n_data;
 }
 
 /*!
  * \internal
  * \brief Create a new notification entry
  *
  * \param[in] rsc   Resource for notification
  * \param[in] node  Node for notification
  *
  * \return Newly allocated notification entry
  * \note The caller is responsible for freeing the return value.
  */
 static notify_entry_t *
 new_notify_entry(const pcmk_resource_t *rsc, const pcmk_node_t *node)
 {
     notify_entry_t *entry = calloc(1, sizeof(notify_entry_t));
 
     CRM_ASSERT(entry != NULL);
     entry->rsc = rsc;
     entry->node = node;
     return entry;
 }
 
 /*!
  * \internal
  * \brief Add notification data for resource state and optionally actions
  *
  * \param[in]     rsc       Clone or clone instance being notified
  * \param[in]     activity  Whether to add notification entries for actions
  * \param[in,out] n_data    Notification data for clone
  */
 static void
 collect_resource_data(const pcmk_resource_t *rsc, bool activity,
                       notify_data_t *n_data)
 {
     const GList *iter = NULL;
     notify_entry_t *entry = NULL;
     const pcmk_node_t *node = NULL;
 
     if (n_data == NULL) {
         return;
     }
 
     if (n_data->allowed_nodes == NULL) {
         n_data->allowed_nodes = rsc->allowed_nodes;
     }
 
     // If this is a clone, call recursively for each instance
     if (rsc->children != NULL) {
         for (iter = rsc->children; iter != NULL; iter = iter->next) {
             const pcmk_resource_t *child = (const pcmk_resource_t *) iter->data;
 
             collect_resource_data(child, activity, n_data);
         }
         return;
     }
 
     // This is a notification for a single clone instance
 
     if (rsc->running_on != NULL) {
         node = rsc->running_on->data; // First is sufficient
     }
     entry = new_notify_entry(rsc, node);
 
     // Add notification indicating the resource state
     switch (rsc->role) {
         case pcmk_role_stopped:
             n_data->inactive = g_list_prepend(n_data->inactive, entry);
             break;
 
         case pcmk_role_started:
             n_data->active = g_list_prepend(n_data->active, entry);
             break;
 
         case pcmk_role_unpromoted:
             n_data->unpromoted = g_list_prepend(n_data->unpromoted, entry);
             n_data->active = g_list_prepend(n_data->active,
                                             dup_notify_entry(entry));
             break;
 
         case pcmk_role_promoted:
             n_data->promoted = g_list_prepend(n_data->promoted, entry);
             n_data->active = g_list_prepend(n_data->active,
                                             dup_notify_entry(entry));
             break;
 
         default:
             pcmk__sched_err("Resource %s role on %s (%s) is not supported for "
                             "notifications (bug?)",
                             rsc->id, pe__node_name(node), role2text(rsc->role));
             free(entry);
             break;
     }
 
     if (!activity) {
         return;
     }
 
     // Add notification entries for each of the resource's actions
     for (iter = rsc->actions; iter != NULL; iter = iter->next) {
         const pcmk_action_t *op = (const pcmk_action_t *) iter->data;
 
         if (!pcmk_is_set(op->flags, pcmk_action_optional)
             && (op->node != NULL)) {
             enum action_tasks task = text2task(op->task);
 
             if ((task == pcmk_action_stop) && op->node->details->unclean) {
                 // Create anyway (additional noise if node can't be fenced)
             } else if (!pcmk_is_set(op->flags, pcmk_action_runnable)) {
                 continue;
             }
 
             entry = new_notify_entry(rsc, op->node);
 
             switch (task) {
                 case pcmk_action_start:
                     n_data->start = g_list_prepend(n_data->start, entry);
                     break;
                 case pcmk_action_stop:
                     n_data->stop = g_list_prepend(n_data->stop, entry);
                     break;
                 case pcmk_action_promote:
                     n_data->promote = g_list_prepend(n_data->promote, entry);
                     break;
                 case pcmk_action_demote:
                     n_data->demote = g_list_prepend(n_data->demote, entry);
                     break;
                 default:
                     free(entry);
                     break;
             }
         }
     }
 }
 
 // For (char *) value
 #define add_notify_env(n_data, key, value) do {                         \
          n_data->keys = pcmk_prepend_nvpair(n_data->keys, key, value);  \
     } while (0)
 
 // For (GString *) value
 #define add_notify_env_gs(n_data, key, value) do {                      \
          n_data->keys = pcmk_prepend_nvpair(n_data->keys, key,          \
                                             (const char *) value->str); \
     } while (0)
 
 // For (GString *) value
 #define add_notify_env_free_gs(n_data, key, value) do {                 \
          n_data->keys = pcmk_prepend_nvpair(n_data->keys, key,          \
                                             (const char *) value->str); \
          g_string_free(value, TRUE); value = NULL;                      \
     } while (0)
 
 /*!
  * \internal
  * \brief Create notification name/value pairs from structured data
  *
  * \param[in]     rsc       Resource that notification is for
  * \param[in,out] n_data    Notification data
  */
 static void
 add_notif_keys(const pcmk_resource_t *rsc, notify_data_t *n_data)
 {
     bool required = false; // Whether to make notify actions required
     GString *rsc_list = NULL;
     GString *node_list = NULL;
     GString *metal_list = NULL;
     const char *source = NULL;
     GList *nodes = NULL;
 
     n_data->stop = notify_entries_to_strings(n_data->stop,
                                              &rsc_list, &node_list);
     if ((strcmp(" ", (const char *) rsc_list->str) != 0)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_STOP, pcmk__str_none)) {
         required = true;
     }
     add_notify_env_free_gs(n_data, "notify_stop_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_stop_uname", node_list);
 
     if ((n_data->start != NULL)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_START, pcmk__str_none)) {
         required = true;
     }
     n_data->start = notify_entries_to_strings(n_data->start,
                                               &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_start_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_start_uname", node_list);
 
     if ((n_data->demote != NULL)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_DEMOTE, pcmk__str_none)) {
         required = true;
     }
     n_data->demote = notify_entries_to_strings(n_data->demote,
                                                &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_demote_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_demote_uname", node_list);
 
     if ((n_data->promote != NULL)
         && pcmk__str_eq(n_data->action, PCMK_ACTION_PROMOTE, pcmk__str_none)) {
         required = true;
     }
     n_data->promote = notify_entries_to_strings(n_data->promote,
                                                 &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_promote_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_promote_uname", node_list);
 
     n_data->active = notify_entries_to_strings(n_data->active,
                                                &rsc_list, &node_list);
     add_notify_env_free_gs(n_data, "notify_active_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_active_uname", node_list);
 
     n_data->unpromoted = notify_entries_to_strings(n_data->unpromoted,
                                                    &rsc_list, &node_list);
     add_notify_env_gs(n_data, "notify_unpromoted_resource", rsc_list);
     add_notify_env_gs(n_data, "notify_unpromoted_uname", node_list);
 
     // Deprecated: kept for backward compatibility with older resource agents
     add_notify_env_free_gs(n_data, "notify_slave_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_slave_uname", node_list);
 
     n_data->promoted = notify_entries_to_strings(n_data->promoted,
                                                  &rsc_list, &node_list);
     add_notify_env_gs(n_data, "notify_promoted_resource", rsc_list);
     add_notify_env_gs(n_data, "notify_promoted_uname", node_list);
 
     // Deprecated: kept for backward compatibility with older resource agents
     add_notify_env_free_gs(n_data, "notify_master_resource", rsc_list);
     add_notify_env_free_gs(n_data, "notify_master_uname", node_list);
 
     n_data->inactive = notify_entries_to_strings(n_data->inactive,
                                                  &rsc_list, NULL);
     add_notify_env_free_gs(n_data, "notify_inactive_resource", rsc_list);
 
     nodes = g_hash_table_get_values(n_data->allowed_nodes);
     if (!pcmk__is_daemon) {
         /* For display purposes, sort the node list, for consistent
          * regression test output (while avoiding the performance hit
          * for the live cluster).
          */
         nodes = g_list_sort(nodes, pe__cmp_node_name);
     }
     get_node_names(nodes, &node_list, NULL);
     add_notify_env_free_gs(n_data, "notify_available_uname", node_list);
     g_list_free(nodes);
 
-    source = g_hash_table_lookup(rsc->meta, XML_RSC_ATTR_TARGET);
+    source = g_hash_table_lookup(rsc->meta, PCMK_META_CONTAINER_ATTR_TARGET);
     if (pcmk__str_eq("host", source, pcmk__str_none)) {
         get_node_names(rsc->cluster->nodes, &node_list, &metal_list);
         add_notify_env_free_gs(n_data, "notify_all_hosts", metal_list);
     } else {
         get_node_names(rsc->cluster->nodes, &node_list, NULL);
     }
     add_notify_env_free_gs(n_data, "notify_all_uname", node_list);
 
     if (required && (n_data->pre != NULL)) {
         pe__clear_action_flags(n_data->pre, pcmk_action_optional);
         pe__clear_action_flags(n_data->pre_done, pcmk_action_optional);
     }
 
     if (required && (n_data->post != NULL)) {
         pe__clear_action_flags(n_data->post, pcmk_action_optional);
         pe__clear_action_flags(n_data->post_done, pcmk_action_optional);
     }
 }
 
 /*
  * \internal
  * \brief Find any remote connection start relevant to an action
  *
  * \param[in] action  Action to check
  *
  * \return If action is behind a remote connection, connection's start
  */
 static pcmk_action_t *
 find_remote_start(pcmk_action_t *action)
 {
     if ((action != NULL) && (action->node != NULL)) {
         pcmk_resource_t *remote_rsc = action->node->details->remote_rsc;
 
         if (remote_rsc != NULL) {
             return find_first_action(remote_rsc->actions, NULL,
                                      PCMK_ACTION_START,
                                      NULL);
         }
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Create notify actions, and add notify data to original actions
  *
  * \param[in,out] rsc     Clone or clone instance that notification is for
  * \param[in,out] n_data  Clone notification data for some action
  */
 static void
 create_notify_actions(pcmk_resource_t *rsc, notify_data_t *n_data)
 {
     GList *iter = NULL;
     pcmk_action_t *stop = NULL;
     pcmk_action_t *start = NULL;
     enum action_tasks task = text2task(n_data->action);
 
     // If this is a clone, call recursively for each instance
     if (rsc->children != NULL) {
         g_list_foreach(rsc->children, (GFunc) create_notify_actions, n_data);
         return;
     }
 
     // Add notification meta-attributes to original actions
     for (iter = rsc->actions; iter != NULL; iter = iter->next) {
         pcmk_action_t *op = (pcmk_action_t *) iter->data;
 
         if (!pcmk_is_set(op->flags, pcmk_action_optional)
             && (op->node != NULL)) {
             switch (text2task(op->task)) {
                 case pcmk_action_start:
                 case pcmk_action_stop:
                 case pcmk_action_promote:
                 case pcmk_action_demote:
                     add_notify_data_to_action_meta(n_data, op);
                     break;
                 default:
                     break;
             }
         }
     }
 
     // Skip notify action itself if original action was not needed
     switch (task) {
         case pcmk_action_start:
             if (n_data->start == NULL) {
                 pcmk__rsc_trace(rsc, "No notify action needed for %s %s",
                                 rsc->id, n_data->action);
                 return;
             }
             break;
 
         case pcmk_action_promote:
             if (n_data->promote == NULL) {
                 pcmk__rsc_trace(rsc, "No notify action needed for %s %s",
                                 rsc->id, n_data->action);
                 return;
             }
             break;
 
         case pcmk_action_demote:
             if (n_data->demote == NULL) {
                 pcmk__rsc_trace(rsc, "No notify action needed for %s %s",
                                 rsc->id, n_data->action);
                 return;
             }
             break;
 
         default:
             // We cannot do same for stop because it might be implied by fencing
             break;
     }
 
     pcmk__rsc_trace(rsc, "Creating notify actions for %s %s",
                     rsc->id, n_data->action);
 
     // Create notify actions for stop or demote
     if ((rsc->role != pcmk_role_stopped)
         && ((task == pcmk_action_stop) || (task == pcmk_action_demote))) {
 
         stop = find_first_action(rsc->actions, NULL, PCMK_ACTION_STOP, NULL);
 
         for (iter = rsc->running_on; iter != NULL; iter = iter->next) {
             pcmk_node_t *current_node = (pcmk_node_t *) iter->data;
 
             /* If a stop is a pseudo-action implied by fencing, don't try to
              * notify the node getting fenced.
              */
             if ((stop != NULL)
                 && pcmk_is_set(stop->flags, pcmk_action_pseudo)
                 && (current_node->details->unclean
                     || current_node->details->remote_requires_reset)) {
                 continue;
             }
 
             new_notify_action(rsc, current_node, n_data->pre,
                               n_data->pre_done, n_data);
 
             if ((task == pcmk_action_demote) || (stop == NULL)
                 || pcmk_is_set(stop->flags, pcmk_action_optional)) {
                 new_post_notify_action(rsc, current_node, n_data);
             }
         }
     }
 
     // Create notify actions for start or promote
     if ((rsc->next_role != pcmk_role_stopped)
         && ((task == pcmk_action_start) || (task == pcmk_action_promote))) {
 
         start = find_first_action(rsc->actions, NULL, PCMK_ACTION_START, NULL);
         if (start != NULL) {
             pcmk_action_t *remote_start = find_remote_start(start);
 
             if ((remote_start != NULL)
                 && !pcmk_is_set(remote_start->flags, pcmk_action_runnable)) {
                 /* Start and promote actions for a clone instance behind
                  * a Pacemaker Remote connection happen after the
                  * connection starts. If the connection start is blocked, do
                  * not schedule notifications for these actions.
                  */
                 return;
             }
         }
         if (rsc->allocated_to == NULL) {
             pcmk__sched_err("Next role '%s' but %s is not allocated",
                             role2text(rsc->next_role), rsc->id);
             return;
         }
         if ((task != pcmk_action_start) || (start == NULL)
             || pcmk_is_set(start->flags, pcmk_action_optional)) {
 
             new_notify_action(rsc, rsc->allocated_to, n_data->pre,
                               n_data->pre_done, n_data);
         }
         new_post_notify_action(rsc, rsc->allocated_to, n_data);
     }
 }
 
 /*!
  * \internal
  * \brief Create notification data and actions for one clone action
  *
  * \param[in,out] rsc     Clone resource that notification is for
  * \param[in,out] n_data  Clone notification data for some action
  */
 void
 pe__create_action_notifications(pcmk_resource_t *rsc, notify_data_t *n_data)
 {
     if ((rsc == NULL) || (n_data == NULL)) {
         return;
     }
     collect_resource_data(rsc, true, n_data);
     add_notif_keys(rsc, n_data);
     create_notify_actions(rsc, n_data);
 }
 
 /*!
  * \internal
  * \brief Free notification data for one action
  *
  * \param[in,out] n_data  Notification data to free
  */
 void
 pe__free_action_notification_data(notify_data_t *n_data)
 {
     if (n_data == NULL) {
         return;
     }
     g_list_free_full(n_data->stop, free);
     g_list_free_full(n_data->start, free);
     g_list_free_full(n_data->demote, free);
     g_list_free_full(n_data->promote, free);
     g_list_free_full(n_data->promoted, free);
     g_list_free_full(n_data->unpromoted, free);
     g_list_free_full(n_data->active, free);
     g_list_free_full(n_data->inactive, free);
     pcmk_free_nvpairs(n_data->keys);
     free(n_data);
 }
 
 /*!
  * \internal
  * \brief Order clone "notifications complete" pseudo-action after fencing
  *
  * If a stop action is implied by fencing, the usual notification pseudo-actions
  * will not be sufficient to order things properly, or even create all needed
  * notifications if the clone is also stopping on another node, and another
  * clone is ordered after it. This function creates new notification
  * pseudo-actions relative to the fencing to ensure everything works properly.
  *
  * \param[in]     stop        Stop action implied by fencing
  * \param[in,out] rsc         Clone resource that notification is for
  * \param[in,out] stonith_op  Fencing action that implies \p stop
  */
 void
 pe__order_notifs_after_fencing(const pcmk_action_t *stop, pcmk_resource_t *rsc,
                                pcmk_action_t *stonith_op)
 {
     notify_data_t *n_data;
 
     crm_info("Ordering notifications for implied %s after fencing", stop->uuid);
     n_data = pe__action_notif_pseudo_ops(rsc, PCMK_ACTION_STOP, NULL,
                                          stonith_op);
 
     if (n_data != NULL) {
         collect_resource_data(rsc, false, n_data);
         add_notify_env(n_data, "notify_stop_resource", rsc->id);
         add_notify_env(n_data, "notify_stop_uname", stop->node->details->uname);
         create_notify_actions(uber_parent(rsc), n_data);
         pe__free_action_notification_data(n_data);
     }
 }