diff --git a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot
index 7219ee5a6d..a2356f2280 100644
--- a/cts/scheduler/dot/clone-recover-no-shuffle-5.dot
+++ b/cts/scheduler/dot/clone-recover-no-shuffle-5.dot
@@ -1,80 +1,56 @@
  digraph "g" {
 "grp-clone_running_0" [ style=bold color="green" fontcolor="orange"]
 "grp-clone_start_0" -> "grp-clone_running_0" [ style = bold]
 "grp-clone_start_0" -> "grp:0_start_0" [ style = bold]
-"grp-clone_start_0" -> "grp:1_start_0" [ style = bold]
 "grp-clone_start_0" -> "grp:2_start_0" [ style = bold]
 "grp-clone_start_0" [ style=bold color="green" fontcolor="orange"]
 "grp-clone_stop_0" -> "grp-clone_stopped_0" [ style = bold]
 "grp-clone_stop_0" -> "grp:0_stop_0" [ style = bold]
-"grp-clone_stop_0" -> "grp:1_stop_0" [ style = bold]
 "grp-clone_stop_0" [ style=bold color="green" fontcolor="orange"]
 "grp-clone_stopped_0" -> "grp-clone_start_0" [ style = bold]
 "grp-clone_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "grp:0_running_0" -> "grp-clone_running_0" [ style = bold]
 "grp:0_running_0" [ style=bold color="green" fontcolor="orange"]
 "grp:0_start_0" -> "grp:0_running_0" [ style = bold]
 "grp:0_start_0" -> "rsc1_start_0 node1" [ style = bold]
 "grp:0_start_0" -> "rsc2_start_0 node1" [ style = bold]
 "grp:0_start_0" [ style=bold color="green" fontcolor="orange"]
 "grp:0_stop_0" -> "grp:0_stopped_0" [ style = bold]
 "grp:0_stop_0" -> "rsc1_stop_0 node2" [ style = bold]
 "grp:0_stop_0" -> "rsc2_stop_0 node2" [ style = bold]
 "grp:0_stop_0" [ style=bold color="green" fontcolor="orange"]
 "grp:0_stopped_0" -> "grp-clone_stopped_0" [ style = bold]
 "grp:0_stopped_0" -> "grp:0_start_0" [ style = bold]
 "grp:0_stopped_0" [ style=bold color="green" fontcolor="orange"]
-"grp:1_running_0" -> "grp-clone_running_0" [ style = bold]
-"grp:1_running_0" [ style=bold color="green" fontcolor="orange"]
-"grp:1_start_0" -> "grp:1_running_0" [ style = bold]
-"grp:1_start_0" -> "rsc1_start_0 node1" [ style = bold]
-"grp:1_start_0" -> "rsc2_start_0 node1" [ style = bold]
-"grp:1_start_0" [ style=bold color="green" fontcolor="orange"]
-"grp:1_stop_0" -> "grp:1_stopped_0" [ style = bold]
-"grp:1_stop_0" -> "rsc1_stop_0 node3" [ style = bold]
-"grp:1_stop_0" -> "rsc2_stop_0 node3" [ style = bold]
-"grp:1_stop_0" [ style=bold color="green" fontcolor="orange"]
-"grp:1_stopped_0" -> "grp-clone_stopped_0" [ style = bold]
-"grp:1_stopped_0" -> "grp:1_start_0" [ style = bold]
-"grp:1_stopped_0" [ style=bold color="green" fontcolor="orange"]
 "grp:2_running_0" -> "grp-clone_running_0" [ style = bold]
 "grp:2_running_0" [ style=bold color="green" fontcolor="orange"]
 "grp:2_start_0" -> "grp:2_running_0" [ style = bold]
-"grp:2_start_0" -> "rsc1:2_start_0 node1" [ style = bold]
-"grp:2_start_0" -> "rsc2:2_start_0 node1" [ style = bold]
+"grp:2_start_0" -> "rsc1:2_start_0 node2" [ style = bold]
+"grp:2_start_0" -> "rsc2:2_start_0 node2" [ style = bold]
 "grp:2_start_0" [ style=bold color="green" fontcolor="orange"]
-"rsc1:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
-"rsc1:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
-"rsc1:2_start_0 node1" -> "rsc1:2_monitor_10000 node1" [ style = bold]
-"rsc1:2_start_0 node1" -> "rsc2:2_start_0 node1" [ style = bold]
-"rsc1:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc1:2_start_0 node2" -> "grp:2_running_0" [ style = bold]
+"rsc1:2_start_0 node2" -> "rsc1:2_monitor_10000 node2" [ style = bold]
+"rsc1:2_start_0 node2" -> "rsc2:2_start_0 node2" [ style = bold]
+"rsc1:2_start_0 node2" [ style=bold color="green" fontcolor="black"]
 "rsc1_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
 "rsc1_start_0 node1" -> "grp:0_running_0" [ style = bold]
-"rsc1_start_0 node1" -> "grp:1_running_0" [ style = bold]
 "rsc1_start_0 node1" -> "rsc1_monitor_10000 node1" [ style = bold]
 "rsc1_start_0 node1" -> "rsc2_start_0 node1" [ style = bold]
 "rsc1_start_0 node1" [ style=bold color="green" fontcolor="black"]
 "rsc1_stop_0 node2" -> "grp:0_stopped_0" [ style = bold]
 "rsc1_stop_0 node2" -> "rsc1_start_0 node1" [ style = bold]
 "rsc1_stop_0 node2" [ style=bold color="green" fontcolor="black"]
-"rsc1_stop_0 node3" -> "grp:1_stopped_0" [ style = bold]
-"rsc1_stop_0 node3" -> "rsc1_start_0 node1" [ style = bold]
-"rsc1_stop_0 node3" [ style=bold color="green" fontcolor="black"]
-"rsc2:2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
-"rsc2:2_start_0 node1" -> "grp:2_running_0" [ style = bold]
-"rsc2:2_start_0 node1" -> "rsc2:2_monitor_10000 node1" [ style = bold]
-"rsc2:2_start_0 node1" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_monitor_10000 node2" [ style=bold color="green" fontcolor="black"]
+"rsc2:2_start_0 node2" -> "grp:2_running_0" [ style = bold]
+"rsc2:2_start_0 node2" -> "rsc2:2_monitor_10000 node2" [ style = bold]
+"rsc2:2_start_0 node2" [ style=bold color="green" fontcolor="black"]
 "rsc2_monitor_10000 node1" [ style=bold color="green" fontcolor="black"]
 "rsc2_start_0 node1" -> "grp:0_running_0" [ style = bold]
-"rsc2_start_0 node1" -> "grp:1_running_0" [ style = bold]
 "rsc2_start_0 node1" -> "rsc2_monitor_10000 node1" [ style = bold]
 "rsc2_start_0 node1" [ style=bold color="green" fontcolor="black"]
 "rsc2_stop_0 node2" -> "grp:0_stopped_0" [ style = bold]
 "rsc2_stop_0 node2" -> "rsc1_stop_0 node2" [ style = bold]
 "rsc2_stop_0 node2" -> "rsc2_start_0 node1" [ style = bold]
 "rsc2_stop_0 node2" [ style=bold color="green" fontcolor="black"]
-"rsc2_stop_0 node3" -> "grp:1_stopped_0" [ style = bold]
-"rsc2_stop_0 node3" -> "rsc1_stop_0 node3" [ style = bold]
-"rsc2_stop_0 node3" -> "rsc2_start_0 node1" [ style = bold]
-"rsc2_stop_0 node3" [ style=bold color="green" fontcolor="black"]
 }
diff --git a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp
index 8a8e799793..c1cee43b12 100644
--- a/cts/scheduler/exp/clone-recover-no-shuffle-5.exp
+++ b/cts/scheduler/exp/clone-recover-no-shuffle-5.exp
@@ -1,452 +1,293 @@
 <transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY"  transition_id="0">
   <synapse id="0">
     <action_set>
       <pseudo_event id="17" operation="stopped" operation_key="grp:0_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="8" operation="stop" operation_key="rsc1_stop_0" internal_operation_key="rsc1:0_stop_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
         <rsc_op id="11" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:0_stop_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
         <pseudo_event id="16" operation="stop" operation_key="grp:0_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="1">
     <action_set>
       <pseudo_event id="16" operation="stop" operation_key="grp:0_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
-        <pseudo_event id="38" operation="stop" operation_key="grp-clone_stop_0"/>
+        <pseudo_event id="36" operation="stop" operation_key="grp-clone_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="2">
     <action_set>
       <pseudo_event id="15" operation="running" operation_key="grp:0_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="9" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:0_start_0" on_node="node1" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <rsc_op id="12" operation="start" operation_key="rsc2_start_0" internal_operation_key="rsc2:0_start_0" on_node="node1" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <pseudo_event id="14" operation="start" operation_key="grp:0_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="3">
     <action_set>
       <pseudo_event id="14" operation="start" operation_key="grp:0_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="17" operation="stopped" operation_key="grp:0_stopped_0"/>
       </trigger>
       <trigger>
-        <pseudo_event id="36" operation="start" operation_key="grp-clone_start_0"/>
+        <pseudo_event id="34" operation="start" operation_key="grp-clone_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="4">
     <action_set>
       <rsc_op id="10" operation="monitor" operation_key="rsc1_monitor_10000" internal_operation_key="rsc1:0_monitor_10000" on_node="node1" on_node_uuid="1">
         <primitive id="rsc1" long-id="rsc1:0" class="ocf" provider="pacemaker" type="Dummy"/>
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="9" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:0_start_0" on_node="node1" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="5">
     <action_set>
       <rsc_op id="9" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:0_start_0" on_node="node1" on_node_uuid="1">
         <primitive id="rsc1" long-id="rsc1:0" class="ocf" provider="pacemaker" type="Dummy"/>
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="8" operation="stop" operation_key="rsc1_stop_0" internal_operation_key="rsc1:0_stop_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
         <pseudo_event id="14" operation="start" operation_key="grp:0_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="6">
     <action_set>
       <rsc_op id="8" operation="stop" operation_key="rsc1_stop_0" internal_operation_key="rsc1:0_stop_0" on_node="node2" on_node_uuid="2">
         <primitive id="rsc1" long-id="rsc1:0" class="ocf" provider="pacemaker" type="Dummy"/>
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="11" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:0_stop_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
         <pseudo_event id="16" operation="stop" operation_key="grp:0_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="7">
     <action_set>
       <rsc_op id="13" operation="monitor" operation_key="rsc2_monitor_10000" internal_operation_key="rsc2:0_monitor_10000" on_node="node1" on_node_uuid="1">
         <primitive id="rsc2" long-id="rsc2:0" class="ocf" provider="pacemaker" type="Dummy"/>
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="12" operation="start" operation_key="rsc2_start_0" internal_operation_key="rsc2:0_start_0" on_node="node1" on_node_uuid="1"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="8">
     <action_set>
       <rsc_op id="12" operation="start" operation_key="rsc2_start_0" internal_operation_key="rsc2:0_start_0" on_node="node1" on_node_uuid="1">
         <primitive id="rsc2" long-id="rsc2:0" class="ocf" provider="pacemaker" type="Dummy"/>
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <rsc_op id="9" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:0_start_0" on_node="node1" on_node_uuid="1"/>
       </trigger>
       <trigger>
         <rsc_op id="11" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:0_stop_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
         <pseudo_event id="14" operation="start" operation_key="grp:0_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="9">
     <action_set>
       <rsc_op id="11" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:0_stop_0" on_node="node2" on_node_uuid="2">
         <primitive id="rsc2" long-id="rsc2:0" class="ocf" provider="pacemaker" type="Dummy"/>
         <attributes CRM_meta_clone="0" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="16" operation="stop" operation_key="grp:0_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="10">
     <action_set>
-      <pseudo_event id="27" operation="stopped" operation_key="grp:1_stopped_0">
+      <pseudo_event id="31" operation="running" operation_key="grp:2_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="18" operation="stop" operation_key="rsc1_stop_0" internal_operation_key="rsc1:1_stop_0" on_node="node3" on_node_uuid="3"/>
+        <rsc_op id="26" operation="start" operation_key="rsc1:2_start_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
-        <rsc_op id="21" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:1_stop_0" on_node="node3" on_node_uuid="3"/>
+        <rsc_op id="28" operation="start" operation_key="rsc2:2_start_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
-        <pseudo_event id="26" operation="stop" operation_key="grp:1_stop_0"/>
+        <pseudo_event id="30" operation="start" operation_key="grp:2_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="11">
     <action_set>
-      <pseudo_event id="26" operation="stop" operation_key="grp:1_stop_0">
+      <pseudo_event id="30" operation="start" operation_key="grp:2_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
-        <pseudo_event id="38" operation="stop" operation_key="grp-clone_stop_0"/>
+        <pseudo_event id="34" operation="start" operation_key="grp-clone_start_0"/>
       </trigger>
     </inputs>
   </synapse>
   <synapse id="12">
     <action_set>
-      <pseudo_event id="25" operation="running" operation_key="grp:1_running_0">
-        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
-      </pseudo_event>
-    </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="19" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:1_start_0" on_node="node1" on_node_uuid="1"/>
-      </trigger>
-      <trigger>
-        <rsc_op id="22" operation="start" operation_key="rsc2_start_0" internal_operation_key="rsc2:1_start_0" on_node="node1" on_node_uuid="1"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="24" operation="start" operation_key="grp:1_start_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="13">
-    <action_set>
-      <pseudo_event id="24" operation="start" operation_key="grp:1_start_0">
-        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
-      </pseudo_event>
-    </action_set>
-    <inputs>
-      <trigger>
-        <pseudo_event id="27" operation="stopped" operation_key="grp:1_stopped_0"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="36" operation="start" operation_key="grp-clone_start_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="14">
-    <action_set>
-      <rsc_op id="20" operation="monitor" operation_key="rsc1_monitor_10000" internal_operation_key="rsc1:1_monitor_10000" on_node="node1" on_node_uuid="1">
-        <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="19" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:1_start_0" on_node="node1" on_node_uuid="1"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="15">
-    <action_set>
-      <rsc_op id="19" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:1_start_0" on_node="node1" on_node_uuid="1">
-        <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="18" operation="stop" operation_key="rsc1_stop_0" internal_operation_key="rsc1:1_stop_0" on_node="node3" on_node_uuid="3"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="24" operation="start" operation_key="grp:1_start_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="16">
-    <action_set>
-      <rsc_op id="18" operation="stop" operation_key="rsc1_stop_0" internal_operation_key="rsc1:1_stop_0" on_node="node3" on_node_uuid="3">
-        <primitive id="rsc1" long-id="rsc1:1" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="21" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:1_stop_0" on_node="node3" on_node_uuid="3"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="26" operation="stop" operation_key="grp:1_stop_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="17">
-    <action_set>
-      <rsc_op id="23" operation="monitor" operation_key="rsc2_monitor_10000" internal_operation_key="rsc2:1_monitor_10000" on_node="node1" on_node_uuid="1">
-        <primitive id="rsc2" long-id="rsc2:1" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="22" operation="start" operation_key="rsc2_start_0" internal_operation_key="rsc2:1_start_0" on_node="node1" on_node_uuid="1"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="18">
-    <action_set>
-      <rsc_op id="22" operation="start" operation_key="rsc2_start_0" internal_operation_key="rsc2:1_start_0" on_node="node1" on_node_uuid="1">
-        <primitive id="rsc2" long-id="rsc2:1" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="19" operation="start" operation_key="rsc1_start_0" internal_operation_key="rsc1:1_start_0" on_node="node1" on_node_uuid="1"/>
-      </trigger>
-      <trigger>
-        <rsc_op id="21" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:1_stop_0" on_node="node3" on_node_uuid="3"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="24" operation="start" operation_key="grp:1_start_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="19">
-    <action_set>
-      <rsc_op id="21" operation="stop" operation_key="rsc2_stop_0" internal_operation_key="rsc2:1_stop_0" on_node="node3" on_node_uuid="3">
-        <primitive id="rsc2" long-id="rsc2:1" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="1" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_on_node="node3" CRM_meta_on_node_uuid="3" CRM_meta_timeout="20000" />
-      </rsc_op>
-    </action_set>
-    <inputs>
-      <trigger>
-        <pseudo_event id="26" operation="stop" operation_key="grp:1_stop_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="20">
-    <action_set>
-      <pseudo_event id="33" operation="running" operation_key="grp:2_running_0">
-        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
-      </pseudo_event>
-    </action_set>
-    <inputs>
-      <trigger>
-        <rsc_op id="28" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
-      </trigger>
-      <trigger>
-        <rsc_op id="30" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="32" operation="start" operation_key="grp:2_start_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="21">
-    <action_set>
-      <pseudo_event id="32" operation="start" operation_key="grp:2_start_0">
-        <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
-      </pseudo_event>
-    </action_set>
-    <inputs>
-      <trigger>
-        <pseudo_event id="36" operation="start" operation_key="grp-clone_start_0"/>
-      </trigger>
-    </inputs>
-  </synapse>
-  <synapse id="22">
-    <action_set>
-      <rsc_op id="29" operation="monitor" operation_key="rsc1:2_monitor_10000" on_node="node1" on_node_uuid="1">
+      <rsc_op id="27" operation="monitor" operation_key="rsc1:2_monitor_10000" on_node="node2" on_node_uuid="2">
         <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="28" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+        <rsc_op id="26" operation="start" operation_key="rsc1:2_start_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="23">
+  <synapse id="13">
     <action_set>
-      <rsc_op id="28" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1">
+      <rsc_op id="26" operation="start" operation_key="rsc1:2_start_0" on_node="node2" on_node_uuid="2">
         <primitive id="rsc1" long-id="rsc1:2" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
-        <pseudo_event id="32" operation="start" operation_key="grp:2_start_0"/>
+        <pseudo_event id="30" operation="start" operation_key="grp:2_start_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="24">
+  <synapse id="14">
     <action_set>
-      <rsc_op id="31" operation="monitor" operation_key="rsc2:2_monitor_10000" on_node="node1" on_node_uuid="1">
+      <rsc_op id="29" operation="monitor" operation_key="rsc2:2_monitor_10000" on_node="node2" on_node_uuid="2">
         <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="30" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1"/>
+        <rsc_op id="28" operation="start" operation_key="rsc2:2_start_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="25">
+  <synapse id="15">
     <action_set>
-      <rsc_op id="30" operation="start" operation_key="rsc2:2_start_0" on_node="node1" on_node_uuid="1">
+      <rsc_op id="28" operation="start" operation_key="rsc2:2_start_0" on_node="node2" on_node_uuid="2">
         <primitive id="rsc2" long-id="rsc2:2" class="ocf" provider="pacemaker" type="Dummy"/>
-        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node1" CRM_meta_on_node_uuid="1" CRM_meta_timeout="20000" />
+        <attributes CRM_meta_clone="2" CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_on_node="node2" CRM_meta_on_node_uuid="2" CRM_meta_timeout="20000" />
       </rsc_op>
     </action_set>
     <inputs>
       <trigger>
-        <rsc_op id="28" operation="start" operation_key="rsc1:2_start_0" on_node="node1" on_node_uuid="1"/>
+        <rsc_op id="26" operation="start" operation_key="rsc1:2_start_0" on_node="node2" on_node_uuid="2"/>
       </trigger>
       <trigger>
-        <pseudo_event id="32" operation="start" operation_key="grp:2_start_0"/>
+        <pseudo_event id="30" operation="start" operation_key="grp:2_start_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="26" priority="1000000">
+  <synapse id="16" priority="1000000">
     <action_set>
-      <pseudo_event id="39" operation="stopped" operation_key="grp-clone_stopped_0">
+      <pseudo_event id="37" operation="stopped" operation_key="grp-clone_stopped_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="17" operation="stopped" operation_key="grp:0_stopped_0"/>
       </trigger>
       <trigger>
-        <pseudo_event id="27" operation="stopped" operation_key="grp:1_stopped_0"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="38" operation="stop" operation_key="grp-clone_stop_0"/>
+        <pseudo_event id="36" operation="stop" operation_key="grp-clone_stop_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="27">
+  <synapse id="17">
     <action_set>
-      <pseudo_event id="38" operation="stop" operation_key="grp-clone_stop_0">
+      <pseudo_event id="36" operation="stop" operation_key="grp-clone_stop_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs/>
   </synapse>
-  <synapse id="28" priority="1000000">
+  <synapse id="18" priority="1000000">
     <action_set>
-      <pseudo_event id="37" operation="running" operation_key="grp-clone_running_0">
+      <pseudo_event id="35" operation="running" operation_key="grp-clone_running_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
         <pseudo_event id="15" operation="running" operation_key="grp:0_running_0"/>
       </trigger>
       <trigger>
-        <pseudo_event id="25" operation="running" operation_key="grp:1_running_0"/>
+        <pseudo_event id="31" operation="running" operation_key="grp:2_running_0"/>
       </trigger>
       <trigger>
-        <pseudo_event id="33" operation="running" operation_key="grp:2_running_0"/>
-      </trigger>
-      <trigger>
-        <pseudo_event id="36" operation="start" operation_key="grp-clone_start_0"/>
+        <pseudo_event id="34" operation="start" operation_key="grp-clone_start_0"/>
       </trigger>
     </inputs>
   </synapse>
-  <synapse id="29">
+  <synapse id="19">
     <action_set>
-      <pseudo_event id="36" operation="start" operation_key="grp-clone_start_0">
+      <pseudo_event id="34" operation="start" operation_key="grp-clone_start_0">
         <attributes CRM_meta_clone_max="3" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" />
       </pseudo_event>
     </action_set>
     <inputs>
       <trigger>
-        <pseudo_event id="39" operation="stopped" operation_key="grp-clone_stopped_0"/>
+        <pseudo_event id="37" operation="stopped" operation_key="grp-clone_stopped_0"/>
       </trigger>
     </inputs>
   </synapse>
 </transition_graph>
diff --git a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores
index eecba43fae..0dd9728830 100644
--- a/cts/scheduler/scores/clone-recover-no-shuffle-5.scores
+++ b/cts/scheduler/scores/clone-recover-no-shuffle-5.scores
@@ -1,79 +1,109 @@
 
 pcmk__clone_assign: grp-clone allocation score on node1: 100
 pcmk__clone_assign: grp-clone allocation score on node2: 0
 pcmk__clone_assign: grp-clone allocation score on node3: 0
 pcmk__clone_assign: grp:0 allocation score on node1: 100
 pcmk__clone_assign: grp:0 allocation score on node2: 0
 pcmk__clone_assign: grp:0 allocation score on node3: 0
 pcmk__clone_assign: grp:1 allocation score on node1: 100
 pcmk__clone_assign: grp:1 allocation score on node2: 0
 pcmk__clone_assign: grp:1 allocation score on node3: 0
 pcmk__clone_assign: grp:2 allocation score on node1: 100
 pcmk__clone_assign: grp:2 allocation score on node2: 0
 pcmk__clone_assign: grp:2 allocation score on node3: 0
 pcmk__clone_assign: rsc1:0 allocation score on node1: 100
 pcmk__clone_assign: rsc1:0 allocation score on node2: 1
 pcmk__clone_assign: rsc1:0 allocation score on node3: 0
 pcmk__clone_assign: rsc1:1 allocation score on node1: 100
 pcmk__clone_assign: rsc1:1 allocation score on node2: 0
 pcmk__clone_assign: rsc1:1 allocation score on node3: 1
 pcmk__clone_assign: rsc1:2 allocation score on node1: 100
 pcmk__clone_assign: rsc1:2 allocation score on node2: 0
 pcmk__clone_assign: rsc1:2 allocation score on node3: 0
 pcmk__clone_assign: rsc2:0 allocation score on node1: 0
 pcmk__clone_assign: rsc2:0 allocation score on node2: 1
 pcmk__clone_assign: rsc2:0 allocation score on node3: 0
 pcmk__clone_assign: rsc2:1 allocation score on node1: 0
 pcmk__clone_assign: rsc2:1 allocation score on node2: 0
 pcmk__clone_assign: rsc2:1 allocation score on node3: 1
 pcmk__clone_assign: rsc2:2 allocation score on node1: 0
 pcmk__clone_assign: rsc2:2 allocation score on node2: 0
 pcmk__clone_assign: rsc2:2 allocation score on node3: 0
 pcmk__group_assign: grp:0 allocation score on node1: 100
+pcmk__group_assign: grp:0 allocation score on node1: 100
+pcmk__group_assign: grp:0 allocation score on node2: 0
 pcmk__group_assign: grp:0 allocation score on node2: 0
 pcmk__group_assign: grp:0 allocation score on node3: 0
+pcmk__group_assign: grp:0 allocation score on node3: 0
+pcmk__group_assign: grp:1 allocation score on node1: -INFINITY
 pcmk__group_assign: grp:1 allocation score on node1: 100
 pcmk__group_assign: grp:1 allocation score on node2: 0
+pcmk__group_assign: grp:1 allocation score on node2: 0
+pcmk__group_assign: grp:1 allocation score on node3: 0
 pcmk__group_assign: grp:1 allocation score on node3: 0
-pcmk__group_assign: grp:2 allocation score on node1: 100
+pcmk__group_assign: grp:2 allocation score on node1: -INFINITY
 pcmk__group_assign: grp:2 allocation score on node2: 0
-pcmk__group_assign: grp:2 allocation score on node3: 0
+pcmk__group_assign: grp:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc1:0 allocation score on node1: 100
 pcmk__group_assign: rsc1:0 allocation score on node1: 100
 pcmk__group_assign: rsc1:0 allocation score on node2: 1
+pcmk__group_assign: rsc1:0 allocation score on node2: 1
+pcmk__group_assign: rsc1:0 allocation score on node3: 0
 pcmk__group_assign: rsc1:0 allocation score on node3: 0
+pcmk__group_assign: rsc1:1 allocation score on node1: -INFINITY
 pcmk__group_assign: rsc1:1 allocation score on node1: 100
 pcmk__group_assign: rsc1:1 allocation score on node2: 0
+pcmk__group_assign: rsc1:1 allocation score on node2: 0
 pcmk__group_assign: rsc1:1 allocation score on node3: 1
-pcmk__group_assign: rsc1:2 allocation score on node1: 100
+pcmk__group_assign: rsc1:1 allocation score on node3: 1
+pcmk__group_assign: rsc1:2 allocation score on node1: -INFINITY
 pcmk__group_assign: rsc1:2 allocation score on node2: 0
-pcmk__group_assign: rsc1:2 allocation score on node3: 0
+pcmk__group_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__group_assign: rsc2:0 allocation score on node1: 0
 pcmk__group_assign: rsc2:0 allocation score on node1: 0
 pcmk__group_assign: rsc2:0 allocation score on node2: 1
+pcmk__group_assign: rsc2:0 allocation score on node2: 1
 pcmk__group_assign: rsc2:0 allocation score on node3: 0
+pcmk__group_assign: rsc2:0 allocation score on node3: 0
+pcmk__group_assign: rsc2:1 allocation score on node1: -INFINITY
 pcmk__group_assign: rsc2:1 allocation score on node1: 0
 pcmk__group_assign: rsc2:1 allocation score on node2: 0
+pcmk__group_assign: rsc2:1 allocation score on node2: 0
+pcmk__group_assign: rsc2:1 allocation score on node3: 1
 pcmk__group_assign: rsc2:1 allocation score on node3: 1
-pcmk__group_assign: rsc2:2 allocation score on node1: 0
+pcmk__group_assign: rsc2:2 allocation score on node1: -INFINITY
 pcmk__group_assign: rsc2:2 allocation score on node2: 0
-pcmk__group_assign: rsc2:2 allocation score on node3: 0
+pcmk__group_assign: rsc2:2 allocation score on node3: -INFINITY
 pcmk__primitive_assign: Fencing allocation score on node1: 0
 pcmk__primitive_assign: Fencing allocation score on node2: 0
 pcmk__primitive_assign: Fencing allocation score on node3: 0
 pcmk__primitive_assign: rsc1:0 allocation score on node1: 100
+pcmk__primitive_assign: rsc1:0 allocation score on node1: 100
+pcmk__primitive_assign: rsc1:0 allocation score on node2: 2
 pcmk__primitive_assign: rsc1:0 allocation score on node2: 2
 pcmk__primitive_assign: rsc1:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:0 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node1: -INFINITY
 pcmk__primitive_assign: rsc1:1 allocation score on node1: 100
 pcmk__primitive_assign: rsc1:1 allocation score on node2: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node2: 0
+pcmk__primitive_assign: rsc1:1 allocation score on node3: 2
 pcmk__primitive_assign: rsc1:1 allocation score on node3: 2
-pcmk__primitive_assign: rsc1:2 allocation score on node1: 100
+pcmk__primitive_assign: rsc1:2 allocation score on node1: -INFINITY
 pcmk__primitive_assign: rsc1:2 allocation score on node2: 0
-pcmk__primitive_assign: rsc1:2 allocation score on node3: 0
+pcmk__primitive_assign: rsc1:2 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node1: 0
 pcmk__primitive_assign: rsc2:0 allocation score on node1: 0
 pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY
 pcmk__primitive_assign: rsc2:0 allocation score on node3: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node1: -INFINITY
 pcmk__primitive_assign: rsc2:1 allocation score on node1: 0
 pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node2: -INFINITY
 pcmk__primitive_assign: rsc2:1 allocation score on node3: -INFINITY
-pcmk__primitive_assign: rsc2:2 allocation score on node1: 0
-pcmk__primitive_assign: rsc2:2 allocation score on node2: -INFINITY
+pcmk__primitive_assign: rsc2:1 allocation score on node3: 1
+pcmk__primitive_assign: rsc2:2 allocation score on node1: -INFINITY
+pcmk__primitive_assign: rsc2:2 allocation score on node2: 0
 pcmk__primitive_assign: rsc2:2 allocation score on node3: -INFINITY
diff --git a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary
index e84d0a574d..121214c42a 100644
--- a/cts/scheduler/summary/clone-recover-no-shuffle-5.summary
+++ b/cts/scheduler/summary/clone-recover-no-shuffle-5.summary
@@ -1,59 +1,46 @@
 Current cluster status:
   * Node List:
     * Online: [ node1 node2 node3 ]
 
   * Full List of Resources:
     * Fencing	(stonith:fence_xvm):	 Started node2
     * Clone Set: grp-clone [grp]:
       * Started: [ node2 node3 ]
       * Stopped: [ node1 ]
 
 Transition Summary:
   * Move       rsc1:0     ( node2 -> node1 )
   * Move       rsc2:0     ( node2 -> node1 )
-  * Move       rsc1:1     ( node3 -> node1 )
-  * Move       rsc2:1     ( node3 -> node1 )
-  * Start      rsc1:2     (          node1 )
-  * Start      rsc2:2     (          node1 )
+  * Start      rsc1:2     (          node2 )
+  * Start      rsc2:2     (          node2 )
 
 Executing Cluster Transition:
   * Pseudo action:   grp-clone_stop_0
   * Pseudo action:   grp:0_stop_0
   * Resource action: rsc2            stop on node2
-  * Pseudo action:   grp:1_stop_0
-  * Resource action: rsc2            stop on node3
   * Resource action: rsc1            stop on node2
-  * Resource action: rsc1            stop on node3
   * Pseudo action:   grp:0_stopped_0
-  * Pseudo action:   grp:1_stopped_0
   * Pseudo action:   grp-clone_stopped_0
   * Pseudo action:   grp-clone_start_0
   * Pseudo action:   grp:0_start_0
   * Resource action: rsc1            start on node1
   * Resource action: rsc2            start on node1
-  * Pseudo action:   grp:1_start_0
-  * Resource action: rsc1            start on node1
-  * Resource action: rsc2            start on node1
   * Pseudo action:   grp:2_start_0
-  * Resource action: rsc1            start on node1
-  * Resource action: rsc2            start on node1
+  * Resource action: rsc1            start on node2
+  * Resource action: rsc2            start on node2
   * Pseudo action:   grp:0_running_0
   * Resource action: rsc1            monitor=10000 on node1
   * Resource action: rsc2            monitor=10000 on node1
-  * Pseudo action:   grp:1_running_0
-  * Resource action: rsc1            monitor=10000 on node1
-  * Resource action: rsc2            monitor=10000 on node1
   * Pseudo action:   grp:2_running_0
-  * Resource action: rsc1            monitor=10000 on node1
-  * Resource action: rsc2            monitor=10000 on node1
+  * Resource action: rsc1            monitor=10000 on node2
+  * Resource action: rsc2            monitor=10000 on node2
   * Pseudo action:   grp-clone_running_0
 
 Revised Cluster Status:
   * Node List:
     * Online: [ node1 node2 node3 ]
 
   * Full List of Resources:
     * Fencing	(stonith:fence_xvm):	 Started node2
     * Clone Set: grp-clone [grp]:
-      * Started: [ node1 ]
-      * Stopped: [ node2 node3 ]
+      * Started: [ node1 node2 node3 ]
diff --git a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml
index 67176dc1a0..45f3b5a9f3 100644
--- a/cts/scheduler/xml/clone-recover-no-shuffle-5.xml
+++ b/cts/scheduler/xml/clone-recover-no-shuffle-5.xml
@@ -1,148 +1,148 @@
 <cib crm_feature_set="3.17.4" validate-with="pacemaker-3.9" epoch="83" num_updates="0" admin_epoch="0" cib-last-written="Tue Jun 20 18:21:31 2023" update-origin="node1" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
   <configuration>
     <!-- The essential elements of this test are:
          * An anonymous clone resource (grp) is stopped on node1 and
            started on node2 and node3
          * Clone instances are resource groups consisting of primitives rsc1 and
            rsc2
          * There is no stickiness configured
          * grp-clone prefers node1 (score=100)
 
          The following should happen:
          * Instance grp:0 should remain started on node2
          * Instance grp:1 should remain started on node3
          * Instance grp:2 should start on node1
 
          This test output is incorrect:
-         * Instance grp:0 moves from node2 to node1
-         * Instance grp:1 moves from node3 to node1
-         * Instance grp:2 starts on node1 (correct)
+         * Instance grp:0 moves to node1
+         * Instance grp:1 remains started on node3 (correct)
+         * Instance grp:2 starts on node2
       -->
     <crm_config>
       <cluster_property_set id="cib-bootstrap-options">
         <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
         <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.1.5-1.0a457786a.git.el9-0a457786a"/>
         <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
         <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="test"/>
         <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1687288330"/>
       </cluster_property_set>
     </crm_config>
     <nodes>
       <node id="1" uname="node1"/>
       <node id="2" uname="node2"/>
       <node id="3" uname="node3"/>
     </nodes>
     <resources>
       <primitive class="stonith" id="Fencing" type="fence_xvm">
         <instance_attributes id="Fencing-instance_attributes">
           <nvpair id="Fencing-instance_attributes_pcmk_host_list" name="pcmk_host_list" value="node1 node2 node3"/>
         </instance_attributes>
         <operations>
           <op id="Fencing-monitor-120s" interval="120s" name="monitor" timeout="120s"/>
         </operations>
       </primitive>
       <clone id="grp-clone">
         <group id="grp">
           <primitive id="rsc1" class="ocf" type="Dummy" provider="pacemaker">
             <operations>
               <op name="migrate_from" interval="0s" timeout="20s" id="rsc1-migrate_from-interval-0s"/>
               <op name="migrate_to" interval="0s" timeout="20s" id="rsc1-migrate_to-interval-0s"/>
               <op name="monitor" interval="10s" timeout="20s" id="rsc1-monitor-interval-10s"/>
               <op name="reload" interval="0s" timeout="20s" id="rsc1-reload-interval-0s"/>
               <op name="reload-agent" interval="0s" timeout="20s" id="rsc1-reload-agent-interval-0s"/>
               <op name="start" interval="0s" timeout="20s" id="rsc1-start-interval-0s"/>
               <op name="stop" interval="0s" timeout="20s" id="rsc1-stop-interval-0s"/>
             </operations>
           </primitive>
           <primitive id="rsc2" class="ocf" type="Dummy" provider="pacemaker">
             <operations>
               <op name="migrate_from" interval="0s" timeout="20s" id="rsc2-migrate_from-interval-0s"/>
               <op name="migrate_to" interval="0s" timeout="20s" id="rsc2-migrate_to-interval-0s"/>
               <op name="monitor" interval="10s" timeout="20s" id="rsc2-monitor-interval-10s"/>
               <op name="reload" interval="0s" timeout="20s" id="rsc2-reload-interval-0s"/>
               <op name="reload-agent" interval="0s" timeout="20s" id="rsc2-reload-agent-interval-0s"/>
               <op name="start" interval="0s" timeout="20s" id="rsc2-start-interval-0s"/>
               <op name="stop" interval="0s" timeout="20s" id="rsc2-stop-interval-0s"/>
             </operations>
           </primitive>
         </group>
         <meta_attributes id="grp-clone-meta_attributes">
           <nvpair id="grp-clone-meta_attributes-clone-node-max" name="clone-node-max" value="1"/>
         </meta_attributes>
       </clone>
     </resources>
     <constraints>
       <rsc_location id="location-grp-clone-node1-100" rsc="grp-clone" node="node1" score="100"/>
     </constraints>
     <op_defaults/>
     <alerts/>
     <rsc_defaults/>
   </configuration>
   <status>
     <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
       <lrm id="2">
         <lrm_resources>
           <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="10:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;10:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="47" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
             <lrm_rsc_op id="Fencing_monitor_120000" operation_key="Fencing_monitor_120000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="11:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;11:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="19" rc-code="0" op-status="0" interval="120000" last-rc-change="1687310527" exec-time="42" queue-time="0" op-digest="24989640311980988fb77ddd1cc1002b"/>
           </lrm_resource>
           <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="20:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;20:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="17" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="21:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;21:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="18" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310527" exec-time="19" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params="  passwd  " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
           <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="22:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;22:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="20" rc-code="0" op-status="0" interval="0" last-rc-change="1687310527" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="23:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;23:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node2" call-id="21" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310527" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params="  passwd  " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="2">
         <instance_attributes id="status-2">
           <nvpair id="status-2-.feature-set" name="#feature-set" value="3.17.4"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
       <lrm id="1">
         <lrm_resources>
           <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="1:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;1:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="3" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
           </lrm_resource>
           <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="2:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;2:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="14" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="17" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
           <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="3:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;3:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node1" call-id="15" rc-code="7" op-status="0" interval="0" last-rc-change="1687310527" exec-time="20" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="1">
         <instance_attributes id="status-1">
           <nvpair id="status-1-.feature-set" name="#feature-set" value="3.17.4"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
     <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="controld_update_resource_history" join="member" expected="member">
       <lrm id="3">
         <lrm_resources>
           <lrm_resource id="Fencing" class="stonith" type="fence_xvm">
             <lrm_rsc_op id="Fencing_last_0" operation_key="Fencing_monitor_0" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="7:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:7;7:0:7:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1687310526" exec-time="2" queue-time="0" op-digest="f551693977f94a4fa8883fb70e439592"/>
           </lrm_resource>
           <lrm_resource id="rsc1" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="rsc1_last_0" operation_key="rsc1_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="12:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;12:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="16" rc-code="0" op-status="0" interval="0" last-rc-change="1687310526" exec-time="16" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="rsc1_monitor_10000" operation_key="rsc1_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="13:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;13:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="17" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310526" exec-time="19" queue-time="1" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params="  passwd  " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
           <lrm_resource id="rsc2" class="ocf" provider="pacemaker" type="Dummy">
             <lrm_rsc_op id="rsc2_last_0" operation_key="rsc2_start_0" operation="start" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="14:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;14:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="18" rc-code="0" op-status="0" interval="0" last-rc-change="1687310526" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-force-restart="  state  " op-restart-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" op-secure-params="  passwd  " op-secure-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8"/>
             <lrm_rsc_op id="rsc2_monitor_10000" operation_key="rsc2_monitor_10000" operation="monitor" crm-debug-origin="controld_update_resource_history" crm_feature_set="3.17.4" transition-key="15:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" transition-magic="0:0;15:0:0:94ad4cf3-e815-4ba8-b397-29a48e0bf3c1" exit-reason="" on_node="node3" call-id="19" rc-code="0" op-status="0" interval="10000" last-rc-change="1687310526" exec-time="12" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" op-secure-params="  passwd  " op-secure-digest="4811cef7f7f94e3a35a70be7916cb2fd"/>
           </lrm_resource>
         </lrm_resources>
       </lrm>
       <transient_attributes id="3">
         <instance_attributes id="status-3">
           <nvpair id="status-3-.feature-set" name="#feature-set" value="3.17.4"/>
         </instance_attributes>
       </transient_attributes>
     </node_state>
   </status>
 </cib>
diff --git a/lib/pacemaker/libpacemaker_private.h b/lib/pacemaker/libpacemaker_private.h
index 93bd17f0b4..941996b33b 100644
--- a/lib/pacemaker/libpacemaker_private.h
+++ b/lib/pacemaker/libpacemaker_private.h
@@ -1,1065 +1,1071 @@
 /*
  * Copyright 2021-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU Lesser General Public License
  * version 2.1 or later (LGPLv2.1+) WITHOUT ANY WARRANTY.
  */
 
 #ifndef PCMK__LIBPACEMAKER_PRIVATE__H
 #  define PCMK__LIBPACEMAKER_PRIVATE__H
 
 /* This header is for the sole use of libpacemaker, so that functions can be
  * declared with G_GNUC_INTERNAL for efficiency.
  */
 
 #include <crm/pengine/pe_types.h> // pe_action_t, pe_node_t, pe_working_set_t
 #include <crm/pengine/internal.h> // pe__location_t
 
 // Flags to modify the behavior of add_colocated_node_scores()
 enum pcmk__coloc_select {
     // With no other flags, apply all "with this" colocations
     pcmk__coloc_select_default      = 0,
 
     // Apply "this with" colocations instead of "with this" colocations
     pcmk__coloc_select_this_with    = (1 << 0),
 
     // Apply only colocations with non-negative scores
     pcmk__coloc_select_nonnegative  = (1 << 1),
 
     // Apply only colocations with at least one matching node
     pcmk__coloc_select_active       = (1 << 2),
 };
 
 // Flags the update_ordered_actions() method can return
 enum pcmk__updated {
     pcmk__updated_none      = 0,        // Nothing changed
     pcmk__updated_first     = (1 << 0), // First action was updated
     pcmk__updated_then      = (1 << 1), // Then action was updated
 };
 
 #define pcmk__set_updated_flags(au_flags, action, flags_to_set) do {        \
         au_flags = pcmk__set_flags_as(__func__, __LINE__,                   \
                                       LOG_TRACE, "Action update",           \
                                       (action)->uuid, au_flags,             \
                                       (flags_to_set), #flags_to_set);       \
     } while (0)
 
 #define pcmk__clear_updated_flags(au_flags, action, flags_to_clear) do {    \
         au_flags = pcmk__clear_flags_as(__func__, __LINE__,                 \
                                         LOG_TRACE, "Action update",         \
                                         (action)->uuid, au_flags,           \
                                         (flags_to_clear), #flags_to_clear); \
     } while (0)
 
 // Resource assignment methods
 struct resource_alloc_functions_s {
     /*!
      * \internal
      * \brief Assign a resource to a node
      *
      * \param[in,out] rsc     Resource to assign to a node
      * \param[in]     prefer  Node to prefer, if all else is equal
      *
      * \return Node that \p rsc is assigned to, if assigned entirely to one node
      */
     pe_node_t *(*assign)(pe_resource_t *rsc, const pe_node_t *prefer);
 
     /*!
      * \internal
      * \brief Create all actions needed for a given resource
      *
      * \param[in,out] rsc  Resource to create actions for
      */
     void (*create_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Schedule any probes needed for a resource on a node
      *
      * \param[in,out] rsc   Resource to create probe for
      * \param[in,out] node  Node to create probe on
      *
      * \return true if any probe was created, otherwise false
      */
     bool (*create_probe)(pe_resource_t *rsc, pe_node_t *node);
 
     /*!
      * \internal
      * \brief Create implicit constraints needed for a resource
      *
      * \param[in,out] rsc  Resource to create implicit constraints for
      */
     void (*internal_constraints)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Apply a colocation's score to node scores or resource priority
      *
      * Given a colocation constraint, apply its score to the dependent's
      * allowed node scores (if we are still placing resources) or priority (if
      * we are choosing promotable clone instance roles).
      *
      * \param[in,out] dependent      Dependent resource in colocation
      * \param[in]     primary        Primary resource in colocation
      * \param[in]     colocation     Colocation constraint to apply
      * \param[in]     for_dependent  true if called on behalf of dependent
      */
     void (*apply_coloc_score)(pe_resource_t *dependent,
                               const pe_resource_t *primary,
                               const pcmk__colocation_t *colocation,
                               bool for_dependent);
 
     /*!
      * \internal
      * \brief Create list of all resources in colocations with a given resource
      *
      * Given a resource, create a list of all resources involved in mandatory
      * colocations with it, whether directly or via chained colocations.
      *
      * \param[in]     rsc             Resource to add to colocated list
      * \param[in]     orig_rsc        Resource originally requested
      * \param[in,out] colocated_rscs  Existing list
      *
      * \return List of given resource and all resources involved in colocations
      *
      * \note This function is recursive; top-level callers should pass NULL as
      *       \p colocated_rscs and \p orig_rsc, and the desired resource as
      *       \p rsc. The recursive calls will use other values.
      */
     GList *(*colocated_resources)(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc,
                                   GList *colocated_rscs);
 
     /*!
      * \internal
      * \brief Add colocations affecting a resource as primary to a list
      *
      * Given a resource being assigned (\p orig_rsc) and a resource somewhere in
      * its chain of ancestors (\p rsc, which may be \p orig_rsc), get
      * colocations that affect the ancestor as primary and should affect the
      * resource, and add them to a given list.
      *
      * \param[in]     rsc       Resource whose colocations should be added
      * \param[in]     orig_rsc  Affected resource (\p rsc or a descendant)
      * \param[in,out] list      List of colocations to add to
      *
      * \note All arguments should be non-NULL.
      * \note The pcmk__with_this_colocations() wrapper should usually be used
      *       instead of using this method directly.
      */
     void (*with_this_colocations)(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
     /*!
      * \internal
      * \brief Add colocations affecting a resource as dependent to a list
      *
      * Given a resource being assigned (\p orig_rsc) and a resource somewhere in
      * its chain of ancestors (\p rsc, which may be \p orig_rsc), get
      * colocations that affect the ancestor as dependent and should affect the
      * resource, and add them to a given list.
      *
      *
      * \param[in]     rsc       Resource whose colocations should be added
      * \param[in]     orig_rsc  Affected resource (\p rsc or a descendant)
      * \param[in,out] list      List of colocations to add to
      *
      * \note All arguments should be non-NULL.
      * \note The pcmk__this_with_colocations() wrapper should usually be used
      *       instead of using this method directly.
      */
     void (*this_with_colocations)(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
     /*!
      * \internal
      * \brief Update nodes with scores of colocated resources' nodes
      *
      * Given a table of nodes and a resource, update the nodes' scores with the
      * scores of the best nodes matching the attribute used for each of the
      * resource's relevant colocations.
      *
      * \param[in,out] rsc         Resource to check colocations for
      * \param[in]     log_id      Resource ID for logs (if NULL, use \p rsc ID)
      * \param[in,out] nodes       Nodes to update (set initial contents to NULL
      *                            to copy \p rsc's allowed nodes)
      * \param[in]     colocation  Original colocation constraint (used to get
      *                            configured primary resource's stickiness, and
      *                            to get colocation node attribute; if NULL,
      *                            \p rsc's own matching node scores will not be
      *                            added, and *nodes must be NULL as well)
      * \param[in]     factor      Incorporate scores multiplied by this factor
      * \param[in]     flags       Bitmask of enum pcmk__coloc_select values
      *
      * \note NULL *nodes, NULL colocation, and the pcmk__coloc_select_this_with
      *       flag are used together (and only by cmp_resources()).
      * \note The caller remains responsible for freeing \p *nodes.
      */
     void (*add_colocated_node_scores)(pe_resource_t *rsc, const char *log_id,
                                       GHashTable **nodes,
                                       pcmk__colocation_t *colocation,
                                       float factor, uint32_t flags);
 
     /*!
      * \internal
      * \brief Apply a location constraint to a resource's allowed node scores
      *
      * \param[in,out] rsc       Resource to apply constraint to
      * \param[in,out] location  Location constraint to apply
      */
     void (*apply_location)(pe_resource_t *rsc, pe__location_t *location);
 
     /*!
      * \internal
      * \brief Return action flags for a given resource action
      *
      * \param[in,out] action  Action to get flags for
      * \param[in]     node    If not NULL, limit effects to this node
      *
      * \return Flags appropriate to \p action on \p node
      * \note For primitives, this will be the same as action->flags regardless
      *       of node. For collective resources, the flags can differ due to
      *       multiple instances possibly being involved.
      */
     uint32_t (*action_flags)(pe_action_t *action, const pe_node_t *node);
 
     /*!
      * \internal
      * \brief Update two actions according to an ordering between them
      *
      * Given information about an ordering of two actions, update the actions'
      * flags (and runnable_before members if appropriate) as appropriate for the
      * ordering. Effects may cascade to other orderings involving the actions as
      * well.
      *
      * \param[in,out] first     'First' action in an ordering
      * \param[in,out] then      'Then' action in an ordering
      * \param[in]     node      If not NULL, limit scope of ordering to this
      *                          node (only used when interleaving instances)
      * \param[in]     flags     Action flags for \p first for ordering purposes
      * \param[in]     filter    Action flags to limit scope of certain updates
      *                          (may include pe_action_optional to affect only
      *                          mandatory actions, and pe_action_runnable to
      *                          affect only runnable actions)
      * \param[in]     type      Group of enum pe_ordering flags to apply
      * \param[in,out] data_set  Cluster working set
      *
      * \return Group of enum pcmk__updated flags indicating what was updated
      */
     uint32_t (*update_ordered_actions)(pe_action_t *first, pe_action_t *then,
                                        const pe_node_t *node, uint32_t flags,
                                        uint32_t filter, uint32_t type,
                                        pe_working_set_t *data_set);
 
     /*!
      * \internal
      * \brief Output a summary of scheduled actions for a resource
      *
      * \param[in,out] rsc  Resource to output actions for
      */
     void (*output_actions)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add a resource's actions to the transition graph
      *
      * \param[in,out] rsc  Resource whose actions should be added
      */
     void (*add_actions_to_graph)(pe_resource_t *rsc);
 
     /*!
      * \internal
      * \brief Add meta-attributes relevant to transition graph actions to XML
      *
      * If a given resource supports variant-specific meta-attributes that are
      * needed for transition graph actions, add them to a given XML element.
      *
      * \param[in]     rsc  Resource whose meta-attributes should be added
      * \param[in,out] xml  Transition graph action attributes XML to add to
      */
     void (*add_graph_meta)(const pe_resource_t *rsc, xmlNode *xml);
 
     /*!
      * \internal
      * \brief Add a resource's utilization to a table of utilization values
      *
      * This function is used when summing the utilization of a resource and all
      * resources colocated with it, to determine whether a node has sufficient
      * capacity. Given a resource and a table of utilization values, it will add
      * the resource's utilization to the existing values, if the resource has
      * not yet been assigned to a node.
      *
      * \param[in]     rsc          Resource with utilization to add
      * \param[in]     orig_rsc     Resource being assigned (for logging only)
      * \param[in]     all_rscs     List of all resources that will be summed
      * \param[in,out] utilization  Table of utilization values to add to
      */
     void (*add_utilization)(const pe_resource_t *rsc,
                             const pe_resource_t *orig_rsc, GList *all_rscs,
                             GHashTable *utilization);
 
     /*!
      * \internal
      * \brief Apply a shutdown lock for a resource, if appropriate
      *
      * \param[in,out] rsc       Resource to check for shutdown lock
      */
     void (*shutdown_lock)(pe_resource_t *rsc);
 };
 
 // Actions (pcmk_sched_actions.c)
 
 G_GNUC_INTERNAL
 void pcmk__update_action_for_orderings(pe_action_t *action,
                                        pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__update_ordered_actions(pe_action_t *first, pe_action_t *then,
                                       const pe_node_t *node, uint32_t flags,
                                       uint32_t filter, uint32_t type,
                                       pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__log_action(const char *pre_text, const pe_action_t *action,
                       bool details);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_cancel_action(pe_resource_t *rsc, const char *name,
                                      guint interval_ms, const pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_action_t *pcmk__new_shutdown_action(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_locks_rsc_to_node(const pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__deduplicate_action_inputs(pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__output_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__check_action_config(pe_resource_t *rsc, pe_node_t *node,
                                const xmlNode *xml_op);
 
 G_GNUC_INTERNAL
 void pcmk__handle_rsc_config_changes(pe_working_set_t *data_set);
 
 
 // Recurring actions (pcmk_sched_recurring.c)
 
 G_GNUC_INTERNAL
 void pcmk__create_recurring_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cancel(pe_resource_t *rsc, const char *call_id,
                            const char *task, guint interval_ms,
                            const pe_node_t *node, const char *reason);
 
 G_GNUC_INTERNAL
 void pcmk__reschedule_recurring(pe_resource_t *rsc, const char *task,
                                 guint interval_ms, pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__action_is_recurring(const pe_action_t *action);
 
 
 // Producing transition graphs (pcmk_graph_producer.c)
 
 G_GNUC_INTERNAL
 bool pcmk__graph_has_loop(const pe_action_t *init_action,
                           const pe_action_t *action,
                           pe_action_wrapper_t *input);
 
 G_GNUC_INTERNAL
 void pcmk__add_rsc_actions_to_graph(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_graph(pe_working_set_t *data_set);
 
 
 // Fencing (pcmk_sched_fencing.c)
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_fence(pe_action_t *stonith_op, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_vs_unfence(const pe_resource_t *rsc, pe_node_t *node,
                             pe_action_t *action, enum pe_ordering order);
 
 G_GNUC_INTERNAL
 void pcmk__fence_guest(pe_node_t *node);
 
 G_GNUC_INTERNAL
 bool pcmk__node_unfenced(const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_restart_vs_unfence(gpointer data, gpointer user_data);
 
 
 // Injected scheduler inputs (pcmk_sched_injections.c)
 
 void pcmk__inject_scheduler_input(pe_working_set_t *data_set, cib_t *cib,
                                   const pcmk_injections_t *injections);
 
 
 // Constraints of any type (pcmk_sched_constraints.c)
 
 G_GNUC_INTERNAL
 pe_resource_t *pcmk__find_constraint_resource(GList *rsc_list, const char *id);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__expand_tags_in_sets(xmlNode *xml_obj,
                                    const pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__valid_resource_or_tag(const pe_working_set_t *data_set,
                                  const char *id, pe_resource_t **rsc,
                                  pe_tag_t **tag);
 
 G_GNUC_INTERNAL
 bool pcmk__tag_to_set(xmlNode *xml_obj, xmlNode **rsc_set, const char *attr,
                       bool convert_rsc, const pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__create_internal_constraints(pe_working_set_t *data_set);
 
 
 // Location constraints
 
 G_GNUC_INTERNAL
 void pcmk__unpack_location(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe__location_t *pcmk__new_location(const char *id, pe_resource_t *rsc,
                                    int node_score, const char *discover_mode,
                                    pe_node_t *foo_node);
 
 G_GNUC_INTERNAL
 void pcmk__apply_locations(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__apply_location(pe_resource_t *rsc, pe__location_t *constraint);
 
 
 // Colocation constraints (pcmk_sched_colocation.c)
 
 enum pcmk__coloc_affects {
     pcmk__coloc_affects_nothing = 0,
     pcmk__coloc_affects_location,
     pcmk__coloc_affects_role,
 };
 
 G_GNUC_INTERNAL
 enum pcmk__coloc_affects pcmk__colocation_affects(const pe_resource_t
                                                     *dependent,
                                                   const pe_resource_t *primary,
                                                   const pcmk__colocation_t
                                                     *colocation,
                                                   bool preview);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_scores(pe_resource_t *dependent,
                                  const pe_resource_t *primary,
                                  const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__apply_coloc_to_priority(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__add_colocated_node_scores(pe_resource_t *rsc, const char *log_id,
                                      GHashTable **nodes,
                                      pcmk__colocation_t *colocation,
                                      float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__add_dependent_scores(gpointer data, gpointer user_data);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_colocation(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__add_this_with(GList **list, const pcmk__colocation_t *colocation,
                          const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__add_this_with_list(GList **list, GList *addition,
                               const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__add_with_this(GList **list, const pcmk__colocation_t *colocation,
                          const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__add_with_this_list(GList **list, GList *addition,
                               const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 GList *pcmk__with_this_colocations(const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 GList *pcmk__this_with_colocations(const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__new_colocation(const char *id, const char *node_attr, int score,
                           pe_resource_t *dependent, pe_resource_t *primary,
                           const char *dependent_role, const char *primary_role,
                           bool influence);
 
 G_GNUC_INTERNAL
 void pcmk__block_colocation_dependents(pe_action_t *action);
 
 /*!
  * \internal
  * \brief Check whether colocation's dependent preferences should be considered
  *
  * \param[in] colocation  Colocation constraint
  * \param[in] rsc         Primary instance (normally this will be
  *                        colocation->primary, which NULL will be treated as,
  *                        but for clones or bundles with multiple instances
  *                        this can be a particular instance)
  *
  * \return true if colocation influence should be effective, otherwise false
  */
 static inline bool
 pcmk__colocation_has_influence(const pcmk__colocation_t *colocation,
                                const pe_resource_t *rsc)
 {
     if (rsc == NULL) {
         rsc = colocation->primary;
     }
 
     /* A bundle replica colocates its remote connection with its container,
      * using a finite score so that the container can run on Pacemaker Remote
      * nodes.
      *
      * Moving a connection is lightweight and does not interrupt the service,
      * while moving a container is heavyweight and does interrupt the service,
      * so don't move a clean, active container based solely on the preferences
      * of its connection.
      *
      * This also avoids problematic scenarios where two containers want to
      * perpetually swap places.
      */
     if (pcmk_is_set(colocation->dependent->flags, pe_rsc_allow_remote_remotes)
         && !pcmk_is_set(rsc->flags, pe_rsc_failed)
         && pcmk__list_of_1(rsc->running_on)) {
         return false;
     }
 
     /* The dependent in a colocation influences the primary's location
      * if the influence option is true or the primary is not yet active.
      */
     return colocation->influence || (rsc->running_on == NULL);
 }
 
 
 // Ordering constraints (pcmk_sched_ordering.c)
 
 G_GNUC_INTERNAL
 void pcmk__new_ordering(pe_resource_t *first_rsc, char *first_task,
                         pe_action_t *first_action, pe_resource_t *then_rsc,
                         char *then_task, pe_action_t *then_action,
                         uint32_t flags, pe_working_set_t *sched);
 
 G_GNUC_INTERNAL
 void pcmk__unpack_ordering(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__disable_invalid_orderings(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 void pcmk__order_stops_before_shutdown(pe_node_t *node,
                                        pe_action_t *shutdown_op);
 
 G_GNUC_INTERNAL
 void pcmk__apply_orderings(pe_working_set_t *sched);
 
 G_GNUC_INTERNAL
 void pcmk__order_after_each(pe_action_t *after, GList *list);
 
 
 /*!
  * \internal
  * \brief Create a new ordering between two resource actions
  *
  * \param[in,out] first_rsc   Resource for 'first' action
  * \param[in,out] first_task  Action key for 'first' action
  * \param[in]     then_rsc    Resource for 'then' action
  * \param[in,out] then_task   Action key for 'then' action
  * \param[in]     flags       Bitmask of enum pe_ordering flags
  */
 #define pcmk__order_resource_actions(first_rsc, first_task,                 \
                                      then_rsc, then_task, flags)            \
     pcmk__new_ordering((first_rsc),                                         \
                        pcmk__op_key((first_rsc)->id, (first_task), 0),      \
                        NULL,                                                \
                        (then_rsc),                                          \
                        pcmk__op_key((then_rsc)->id, (then_task), 0),        \
                        NULL, (flags), (first_rsc)->cluster)
 
 #define pcmk__order_starts(rsc1, rsc2, flags)                \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_START,  \
                                  (rsc2), CRMD_ACTION_START, (flags))
 
 #define pcmk__order_stops(rsc1, rsc2, flags)                 \
     pcmk__order_resource_actions((rsc1), CRMD_ACTION_STOP,   \
                                  (rsc2), CRMD_ACTION_STOP, (flags))
 
 
 // Ticket constraints (pcmk_sched_tickets.c)
 
 G_GNUC_INTERNAL
 void pcmk__unpack_rsc_ticket(xmlNode *xml_obj, pe_working_set_t *data_set);
 
 
 // Promotable clone resources (pcmk_sched_promotable.c)
 
 G_GNUC_INTERNAL
 void pcmk__add_promotion_scores(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__require_promotion_tickets(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__set_instance_roles(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_promotable_actions(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__promotable_restart_ordering(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__order_promotable_instances(pe_resource_t *clone);
 
 G_GNUC_INTERNAL
 void pcmk__update_dependent_with_promotable(const pe_resource_t *primary,
                                             pe_resource_t *dependent,
                                             const pcmk__colocation_t
                                                 *colocation);
 
 G_GNUC_INTERNAL
 void pcmk__update_promotable_dependent_priority(const pe_resource_t *primary,
                                                 pe_resource_t *dependent,
                                                 const pcmk__colocation_t
                                                     *colocation);
 
 
 // Pacemaker Remote nodes (pcmk_sched_remote.c)
 
 G_GNUC_INTERNAL
 bool pcmk__is_failed_remote_node(const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_remote_connection_actions(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_corresponds_to_guest(const pe_resource_t *rsc,
                                     const pe_node_t *node);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__connection_host_for_action(const pe_action_t *action);
 
 G_GNUC_INTERNAL
 void pcmk__substitute_remote_addr(pe_resource_t *rsc, GHashTable *params);
 
 G_GNUC_INTERNAL
 void pcmk__add_bundle_meta_to_xml(xmlNode *args_xml, const pe_action_t *action);
 
 
 // Primitives (pcmk_sched_primitive.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__primitive_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__primitive_action_flags(pe_action_t *action,
                                       const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_apply_coloc_score(pe_resource_t *dependent,
                                        const pe_resource_t *primary,
                                        const pcmk__colocation_t *colocation,
                                        bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_primitive_colocations(const pe_resource_t *rsc,
                                       const pe_resource_t *orig_rsc,
                                       GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_with_colocations(const pe_resource_t *rsc,
                                       const pe_resource_t *orig_rsc,
                                       GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_cleanup(pe_resource_t *rsc, const pe_node_t *node,
                             bool optional);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_add_utilization(const pe_resource_t *rsc,
                                      const pe_resource_t *orig_rsc,
                                      GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__primitive_shutdown_lock(pe_resource_t *rsc);
 
 
 // Groups (pcmk_sched_group.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__group_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__group_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__group_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_coloc_score(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_group_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__group_with_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_colocated_node_scores(pe_resource_t *rsc,
                                            const char *log_id,
                                            GHashTable **nodes,
                                            pcmk__colocation_t *colocation,
                                            float factor, uint32_t flags);
 
 G_GNUC_INTERNAL
 void pcmk__group_apply_location(pe_resource_t *rsc, pe__location_t *location);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__group_action_flags(pe_action_t *action, const pe_node_t *node);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__group_update_ordered_actions(pe_action_t *first,
                                             pe_action_t *then,
                                             const pe_node_t *node,
                                             uint32_t flags, uint32_t filter,
                                             uint32_t type,
                                             pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 GList *pcmk__group_colocated_resources(const pe_resource_t *rsc,
                                        const pe_resource_t *orig_rsc,
                                        GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__group_add_utilization(const pe_resource_t *rsc,
                                  const pe_resource_t *orig_rsc, GList *all_rscs,
                                  GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__group_shutdown_lock(pe_resource_t *rsc);
 
 
 // Clones (pcmk_sched_clone.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__clone_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__clone_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__clone_create_probe(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__clone_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_coloc_score(pe_resource_t *dependent,
                                    const pe_resource_t *primary,
                                    const pcmk__colocation_t *colocation,
                                    bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_clone_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__clone_with_colocations(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__clone_apply_location(pe_resource_t *rsc, pe__location_t *constraint);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__clone_action_flags(pe_action_t *action, const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_actions_to_graph(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__clone_add_utilization(const pe_resource_t *rsc,
                                  const pe_resource_t *orig_rsc,
                                  GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__clone_shutdown_lock(pe_resource_t *rsc);
 
 // Bundles (pcmk_sched_bundle.c)
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__bundle_assign(pe_resource_t *rsc, const pe_node_t *prefer);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_create_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__bundle_create_probe(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_internal_constraints(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_apply_coloc_score(pe_resource_t *dependent,
                                     const pe_resource_t *primary,
                                     const pcmk__colocation_t *colocation,
                                     bool for_dependent);
 
 G_GNUC_INTERNAL
 void pcmk__with_bundle_colocations(const pe_resource_t *rsc,
                                    const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_with_colocations(const pe_resource_t *rsc,
                                    const pe_resource_t *orig_rsc, GList **list);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_apply_location(pe_resource_t *rsc,
                                  pe__location_t *constraint);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__bundle_action_flags(pe_action_t *action, const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__output_bundle_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_add_actions_to_graph(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_add_utilization(const pe_resource_t *rsc,
                                   const pe_resource_t *orig_rsc,
                                   GList *all_rscs, GHashTable *utilization);
 
 G_GNUC_INTERNAL
 void pcmk__bundle_shutdown_lock(pe_resource_t *rsc);
 
 
 // Clone instances or bundle replica containers (pcmk_sched_instances.c)
 
 G_GNUC_INTERNAL
 void pcmk__assign_instances(pe_resource_t *collective, GList *instances,
                             int max_total, int max_per_node);
 
 G_GNUC_INTERNAL
 void pcmk__create_instance_actions(pe_resource_t *rsc, GList *instances);
 
 G_GNUC_INTERNAL
 bool pcmk__instance_matches(const pe_resource_t *instance,
                             const pe_node_t *node, enum rsc_role_e role,
                             bool current);
 
 G_GNUC_INTERNAL
 pe_resource_t *pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
                                               const pe_resource_t *rsc,
                                               enum rsc_role_e role,
                                               bool current);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__instance_update_ordered_actions(pe_action_t *first,
                                                pe_action_t *then,
                                                const pe_node_t *node,
                                                uint32_t flags, uint32_t filter,
                                                uint32_t type,
                                                pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 uint32_t pcmk__collective_action_flags(pe_action_t *action,
                                        const GList *instances,
                                        const pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__add_collective_constraints(GList **list,
                                       const pe_resource_t *instance,
                                       const pe_resource_t *collective,
                                       bool with_this);
 
 
 // Injections (pcmk_injections.c)
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node(cib_t *cib_conn, const char *node, const char *uuid);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_node_state_change(cib_t *cib_conn, const char *node,
                                         bool up);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_resource_history(pcmk__output_t *out, xmlNode *cib_node,
                                        const char *resource,
                                        const char *lrm_name,
                                        const char *rclass,
                                        const char *rtype,
                                        const char *rprovider);
 
 G_GNUC_INTERNAL
 void pcmk__inject_failcount(pcmk__output_t *out, xmlNode *cib_node,
                             const char *resource, const char *task,
                             guint interval_ms, int rc);
 
 G_GNUC_INTERNAL
 xmlNode *pcmk__inject_action_result(xmlNode *cib_resource,
                                     lrmd_event_data_t *op, int target_rc);
 
 
 // Nodes (pcmk_sched_nodes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__node_available(const pe_node_t *node, bool consider_score,
                           bool consider_guest);
 
 G_GNUC_INTERNAL
 bool pcmk__any_node_available(GHashTable *nodes);
 
 G_GNUC_INTERNAL
 GHashTable *pcmk__copy_node_table(GHashTable *nodes);
 
+G_GNUC_INTERNAL
+void pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy);
+
+G_GNUC_INTERNAL
+void pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup);
+
 G_GNUC_INTERNAL
 GList *pcmk__sort_nodes(GList *nodes, pe_node_t *active_node);
 
 G_GNUC_INTERNAL
 void pcmk__apply_node_health(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 pe_node_t *pcmk__top_allowed_node(const pe_resource_t *rsc,
                                   const pe_node_t *node);
 
 
 // Functions applying to more than one variant (pcmk_sched_resource.c)
 
 G_GNUC_INTERNAL
 void pcmk__set_assignment_methods(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
                              const xmlNode *rsc_entry, bool active_on_node);
 
 G_GNUC_INTERNAL
 GList *pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 GList *pcmk__colocated_resources(const pe_resource_t *rsc,
                                  const pe_resource_t *orig_rsc,
                                  GList *colocated_rscs);
 
 G_GNUC_INTERNAL
 void pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml);
 
 G_GNUC_INTERNAL
 void pcmk__output_resource_actions(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force);
 
 G_GNUC_INTERNAL
 void pcmk__unassign_resource(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 bool pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
                              pe_resource_t **failed);
 
 G_GNUC_INTERNAL
 void pcmk__sort_resources(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance(gconstpointer a, gconstpointer b);
 
 G_GNUC_INTERNAL
 gint pcmk__cmp_instance_number(gconstpointer a, gconstpointer b);
 
 
 // Functions related to probes (pcmk_sched_probes.c)
 
 G_GNUC_INTERNAL
 bool pcmk__probe_rsc_on_node(pe_resource_t *rsc, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__order_probes(pe_working_set_t *data_set);
 
 G_GNUC_INTERNAL
 bool pcmk__probe_resource_list(GList *rscs, pe_node_t *node);
 
 G_GNUC_INTERNAL
 void pcmk__schedule_probes(pe_working_set_t *data_set);
 
 
 // Functions related to live migration (pcmk_sched_migration.c)
 
 void pcmk__create_migration_actions(pe_resource_t *rsc,
                                     const pe_node_t *current);
 
 void pcmk__abort_dangling_migration(void *data, void *user_data);
 
 bool pcmk__rsc_can_migrate(const pe_resource_t *rsc, const pe_node_t *current);
 
 void pcmk__order_migration_equivalents(pe__ordering_t *order);
 
 
 // Functions related to node utilization (pcmk_sched_utilization.c)
 
 G_GNUC_INTERNAL
 int pcmk__compare_node_capacities(const pe_node_t *node1,
                                   const pe_node_t *node2);
 
 G_GNUC_INTERNAL
 void pcmk__consume_node_capacity(GHashTable *current_utilization,
                                  const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__release_node_capacity(GHashTable *current_utilization,
                                  const pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 const pe_node_t *pcmk__ban_insufficient_capacity(pe_resource_t *rsc);
 
 G_GNUC_INTERNAL
 void pcmk__create_utilization_constraints(pe_resource_t *rsc,
                                           const GList *allowed_nodes);
 
 G_GNUC_INTERNAL
 void pcmk__show_node_capacities(const char *desc, pe_working_set_t *data_set);
 
 #endif // PCMK__LIBPACEMAKER_PRIVATE__H
diff --git a/lib/pacemaker/pcmk_sched_instances.c b/lib/pacemaker/pcmk_sched_instances.c
index 7f7c7215e1..b2a2e6dca7 100644
--- a/lib/pacemaker/pcmk_sched_instances.c
+++ b/lib/pacemaker/pcmk_sched_instances.c
@@ -1,1657 +1,1656 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 /* This file is intended for code usable with both clone instances and bundle
  * replica containers.
  */
 
 #include <crm_internal.h>
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Check whether a clone or bundle has instances for all available nodes
  *
  * \param[in] collective  Clone or bundle to check
  *
  * \return true if \p collective has enough instances for all of its available
  *         allowed nodes, otherwise false
  */
 static bool
 can_run_everywhere(const pe_resource_t *collective)
 {
     GHashTableIter iter;
     pe_node_t *node = NULL;
     int available_nodes = 0;
     int max_instances = 0;
 
     switch (collective->variant) {
         case pe_clone:
             max_instances = pe__clone_max(collective);
             break;
         case pe_container:
             max_instances = pe__bundle_max(collective);
             break;
         default:
             return false; // Not actually possible
     }
 
     g_hash_table_iter_init(&iter, collective->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         if (pcmk__node_available(node, false, false)
             && (max_instances < ++available_nodes)) {
             return false;
         }
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is allowed to run an instance
  *
  * \param[in] instance      Clone instance or bundle container to check
  * \param[in] node          Node to check
  * \param[in] max_per_node  Maximum number of instances allowed to run on a node
  *
  * \return true if \p node is allowed to run \p instance, otherwise false
  */
 static bool
 can_run_instance(const pe_resource_t *instance, const pe_node_t *node,
                  int max_per_node)
 {
     pe_node_t *allowed_node = NULL;
 
     if (pcmk_is_set(instance->flags, pe_rsc_orphan)) {
         pe_rsc_trace(instance, "%s cannot run on %s: orphaned",
                      instance->id, pe__node_name(node));
         return false;
     }
 
     if (!pcmk__node_available(node, false, false)) {
         pe_rsc_trace(instance,
                      "%s cannot run on %s: node cannot run resources",
                      instance->id, pe__node_name(node));
         return false;
     }
 
     allowed_node = pcmk__top_allowed_node(instance, node);
     if (allowed_node == NULL) {
         crm_warn("%s cannot run on %s: node not allowed",
                  instance->id, pe__node_name(node));
         return false;
     }
 
     if (allowed_node->weight < 0) {
         pe_rsc_trace(instance, "%s cannot run on %s: parent score is %s there",
                      instance->id, pe__node_name(node),
                      pcmk_readable_score(allowed_node->weight));
         return false;
     }
 
     if (allowed_node->count >= max_per_node) {
         pe_rsc_trace(instance,
                      "%s cannot run on %s: node already has %d instance%s",
                      instance->id, pe__node_name(node), max_per_node,
                      pcmk__plural_s(max_per_node));
         return false;
     }
 
     pe_rsc_trace(instance, "%s can run on %s (%d already running)",
                  instance->id, pe__node_name(node), allowed_node->count);
     return true;
 }
 
 /*!
  * \internal
  * \brief Ban a clone instance or bundle replica from unavailable allowed nodes
  *
  * \param[in,out] instance      Clone instance or bundle replica to ban
  * \param[in]     max_per_node  Maximum instances allowed to run on a node
  */
 static void
 ban_unavailable_allowed_nodes(pe_resource_t *instance, int max_per_node)
 {
     if (instance->allowed_nodes != NULL) {
         GHashTableIter iter;
         pe_node_t *node = NULL;
 
         g_hash_table_iter_init(&iter, instance->allowed_nodes);
         while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
             if (!can_run_instance(instance, node, max_per_node)) {
                 pe_rsc_trace(instance, "Banning %s from unavailable node %s",
                              instance->id, pe__node_name(node));
                 node->weight = -INFINITY;
                 for (GList *child_iter = instance->children;
                      child_iter != NULL; child_iter = child_iter->next) {
                     pe_resource_t *child = (pe_resource_t *) child_iter->data;
                     pe_node_t *child_node = NULL;
 
                     child_node = pe_hash_table_lookup(child->allowed_nodes,
                                                       node->details->id);
                     if (child_node != NULL) {
                         pe_rsc_trace(instance,
                                      "Banning %s child %s "
                                      "from unavailable node %s",
                                      instance->id, child->id,
                                      pe__node_name(node));
                         child_node->weight = -INFINITY;
                     }
                 }
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Create a hash table with a single node in it
  *
  * \param[in] node  Node to copy into new table
  *
  * \return Newly created hash table containing a copy of \p node
  * \note The caller is responsible for freeing the result with
  *       g_hash_table_destroy().
  */
 static GHashTable *
 new_node_table(pe_node_t *node)
 {
     GHashTable *table = pcmk__strkey_table(NULL, free);
 
     node = pe__copy_node(node);
     g_hash_table_insert(table, (gpointer) node->details->id, node);
     return table;
 }
 
 /*!
  * \internal
  * \brief Apply a resource's parent's colocation scores to a node table
  *
  * \param[in]     rsc    Resource whose colocations should be applied
  * \param[in,out] nodes  Node table to apply colocations to
  */
 static void
 apply_parent_colocations(const pe_resource_t *rsc, GHashTable **nodes)
 {
     GList *iter = NULL;
     pcmk__colocation_t *colocation = NULL;
     pe_resource_t *other = NULL;
     float factor = 0.0;
 
     /* Because the this_with_colocations() and with_this_colocations() methods
      * boil down to copies of rsc_cons and rsc_cons_lhs for clones and bundles,
      * we can use those here directly for efficiency.
      */
     for (iter = rsc->parent->rsc_cons; iter != NULL; iter = iter->next) {
         colocation = (pcmk__colocation_t *) iter->data;
         other = colocation->primary;
         factor = colocation->score / (float) INFINITY,
         other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
                                                colocation, factor,
                                                pcmk__coloc_select_default);
     }
     for (iter = rsc->parent->rsc_cons_lhs; iter != NULL; iter = iter->next) {
         colocation = (pcmk__colocation_t *) iter->data;
         if (!pcmk__colocation_has_influence(colocation, rsc)) {
             continue;
         }
         other = colocation->dependent;
         factor = colocation->score / (float) INFINITY,
         other->cmds->add_colocated_node_scores(other, rsc->id, nodes,
                                                colocation, factor,
                                                pcmk__coloc_select_nonnegative);
     }
 }
 
 /*!
  * \internal
  * \brief Compare clone or bundle instances based on colocation scores
  *
  * Determine the relative order in which two clone or bundle instances should be
  * assigned to nodes, considering the scores of colocation constraints directly
  * or indirectly involving them.
  *
  * \param[in] instance1  First instance to compare
  * \param[in] instance2  Second instance to compare
  *
  * \return A negative number if \p instance1 should be assigned first,
  *         a positive number if \p instance2 should be assigned first,
  *         or 0 if assignment order doesn't matter
  */
 static int
 cmp_instance_by_colocation(const pe_resource_t *instance1,
                            const pe_resource_t *instance2)
 {
     int rc = 0;
     pe_node_t *node1 = NULL;
     pe_node_t *node2 = NULL;
     pe_node_t *current_node1 = pe__current_node(instance1);
     pe_node_t *current_node2 = pe__current_node(instance2);
     GHashTable *colocated_scores1 = NULL;
     GHashTable *colocated_scores2 = NULL;
 
     CRM_ASSERT((instance1 != NULL) && (instance1->parent != NULL)
                && (instance2 != NULL) && (instance2->parent != NULL)
                && (current_node1 != NULL) && (current_node2 != NULL));
 
     // Create node tables initialized with each node
     colocated_scores1 = new_node_table(current_node1);
     colocated_scores2 = new_node_table(current_node2);
 
     // Apply parental colocations
     apply_parent_colocations(instance1, &colocated_scores1);
     apply_parent_colocations(instance2, &colocated_scores2);
 
     // Find original nodes again, with scores updated for colocations
     node1 = g_hash_table_lookup(colocated_scores1, current_node1->details->id);
     node2 = g_hash_table_lookup(colocated_scores2, current_node2->details->id);
 
     // Compare nodes by updated scores
     if (node1->weight < node2->weight) {
         crm_trace("Assign %s (%d on %s) after %s (%d on %s)",
                   instance1->id, node1->weight, pe__node_name(node1),
                   instance2->id, node2->weight, pe__node_name(node2));
         rc = 1;
 
     } else if (node1->weight > node2->weight) {
         crm_trace("Assign %s (%d on %s) before %s (%d on %s)",
                   instance1->id, node1->weight, pe__node_name(node1),
                   instance2->id, node2->weight, pe__node_name(node2));
         rc = -1;
     }
 
     g_hash_table_destroy(colocated_scores1);
     g_hash_table_destroy(colocated_scores2);
     return rc;
 }
 
 /*!
  * \internal
  * \brief Check whether a resource or any of its children are failed
  *
  * \param[in] rsc  Resource to check
  *
  * \return true if \p rsc or any of its children are failed, otherwise false
  */
 static bool
 did_fail(const pe_resource_t *rsc)
 {
     if (pcmk_is_set(rsc->flags, pe_rsc_failed)) {
         return true;
     }
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         if (did_fail((const pe_resource_t *) iter->data)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Check whether a node is allowed to run a resource
  *
  * \param[in]     rsc   Resource to check
  * \param[in,out] node  Node to check (will be set NULL if not allowed)
  *
  * \return true if *node is either NULL or allowed for \p rsc, otherwise false
  */
 static bool
 node_is_allowed(const pe_resource_t *rsc, pe_node_t **node)
 {
     if (*node != NULL) {
         pe_node_t *allowed = pe_hash_table_lookup(rsc->allowed_nodes,
                                                   (*node)->details->id);
         if ((allowed == NULL) || (allowed->weight < 0)) {
             pe_rsc_trace(rsc, "%s: current location (%s) is unavailable",
                          rsc->id, pe__node_name(*node));
             *node = NULL;
             return false;
         }
     }
     return true;
 }
 
 /*!
  * \internal
  * \brief Compare two clone or bundle instances' instance numbers
  *
  * \param[in] a  First instance to compare
  * \param[in] b  Second instance to compare
  *
  * \return A negative number if \p a's instance number is lower,
  *         a positive number if \p b's instance number is lower,
  *         or 0 if their instance numbers are the same
  */
 gint
 pcmk__cmp_instance_number(gconstpointer a, gconstpointer b)
 {
     const pe_resource_t *instance1 = (const pe_resource_t *) a;
     const pe_resource_t *instance2 = (const pe_resource_t *) b;
     char *div1 = NULL;
     char *div2 = NULL;
 
     CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
 
     // Clone numbers are after a colon, bundle numbers after a dash
     div1 = strrchr(instance1->id, ':');
     if (div1 == NULL) {
         div1 = strrchr(instance1->id, '-');
     }
     div2 = strrchr(instance2->id, ':');
     if (div2 == NULL) {
         div2 = strrchr(instance2->id, '-');
     }
     CRM_ASSERT((div1 != NULL) && (div2 != NULL));
 
     return (gint) (strtol(div1 + 1, NULL, 10) - strtol(div2 + 1, NULL, 10));
 }
 
 /*!
  * \internal
  * \brief Compare clone or bundle instances according to assignment order
  *
  * Compare two clone or bundle instances according to the order they should be
  * assigned to nodes, preferring (in order):
  *
  *  - Active instance that is less multiply active
  *  - Instance that is not active on a disallowed node
  *  - Instance with higher configured priority
  *  - Active instance whose current node can run resources
  *  - Active instance whose parent is allowed on current node
  *  - Active instance whose current node has fewer other instances
  *  - Active instance
  *  - Instance that isn't failed
  *  - Instance whose colocations result in higher score on current node
  *  - Instance with lower ID in lexicographic order
  *
  * \param[in] a          First instance to compare
  * \param[in] b          Second instance to compare
  *
  * \return A negative number if \p a should be assigned first,
  *         a positive number if \p b should be assigned first,
  *         or 0 if assignment order doesn't matter
  */
 gint
 pcmk__cmp_instance(gconstpointer a, gconstpointer b)
 {
     int rc = 0;
     pe_node_t *node1 = NULL;
     pe_node_t *node2 = NULL;
     unsigned int nnodes1 = 0;
     unsigned int nnodes2 = 0;
 
     bool can1 = true;
     bool can2 = true;
 
     const pe_resource_t *instance1 = (const pe_resource_t *) a;
     const pe_resource_t *instance2 = (const pe_resource_t *) b;
 
     CRM_ASSERT((instance1 != NULL) && (instance2 != NULL));
 
     node1 = instance1->fns->active_node(instance1, &nnodes1, NULL);
     node2 = instance2->fns->active_node(instance2, &nnodes2, NULL);
 
     /* If both instances are running and at least one is multiply
      * active, prefer instance that's running on fewer nodes.
      */
     if ((nnodes1 > 0) && (nnodes2 > 0)) {
         if (nnodes1 < nnodes2) {
             crm_trace("Assign %s (active on %d) before %s (active on %d): "
                       "less multiply active",
                       instance1->id, nnodes1, instance2->id, nnodes2);
             return -1;
 
         } else if (nnodes1 > nnodes2) {
             crm_trace("Assign %s (active on %d) after %s (active on %d): "
                       "more multiply active",
                       instance1->id, nnodes1, instance2->id, nnodes2);
             return 1;
         }
     }
 
     /* An instance that is either inactive or active on an allowed node is
      * preferred over an instance that is active on a no-longer-allowed node.
      */
     can1 = node_is_allowed(instance1, &node1);
     can2 = node_is_allowed(instance2, &node2);
     if (can1 && !can2) {
         crm_trace("Assign %s before %s: not active on a disallowed node",
                   instance1->id, instance2->id);
         return -1;
 
     } else if (!can1 && can2) {
         crm_trace("Assign %s after %s: active on a disallowed node",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance with higher configured priority
     if (instance1->priority > instance2->priority) {
         crm_trace("Assign %s before %s: priority (%d > %d)",
                   instance1->id, instance2->id,
                   instance1->priority, instance2->priority);
         return -1;
 
     } else if (instance1->priority < instance2->priority) {
         crm_trace("Assign %s after %s: priority (%d < %d)",
                   instance1->id, instance2->id,
                   instance1->priority, instance2->priority);
         return 1;
     }
 
     // Prefer active instance
     if ((node1 == NULL) && (node2 == NULL)) {
         crm_trace("No assignment preference for %s vs. %s: inactive",
                   instance1->id, instance2->id);
         return 0;
 
     } else if (node1 == NULL) {
         crm_trace("Assign %s after %s: active", instance1->id, instance2->id);
         return 1;
 
     } else if (node2 == NULL) {
         crm_trace("Assign %s before %s: active", instance1->id, instance2->id);
         return -1;
     }
 
     // Prefer instance whose current node can run resources
     can1 = pcmk__node_available(node1, false, false);
     can2 = pcmk__node_available(node2, false, false);
     if (can1 && !can2) {
         crm_trace("Assign %s before %s: current node can run resources",
                   instance1->id, instance2->id);
         return -1;
 
     } else if (!can1 && can2) {
         crm_trace("Assign %s after %s: current node can't run resources",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance whose parent is allowed to run on instance's current node
     node1 = pcmk__top_allowed_node(instance1, node1);
     node2 = pcmk__top_allowed_node(instance2, node2);
     if ((node1 == NULL) && (node2 == NULL)) {
         crm_trace("No assignment preference for %s vs. %s: "
                   "parent not allowed on either instance's current node",
                   instance1->id, instance2->id);
         return 0;
 
     } else if (node1 == NULL) {
         crm_trace("Assign %s after %s: parent not allowed on current node",
                   instance1->id, instance2->id);
         return 1;
 
     } else if (node2 == NULL) {
         crm_trace("Assign %s before %s: parent allowed on current node",
                   instance1->id, instance2->id);
         return -1;
     }
 
     // Prefer instance whose current node is running fewer other instances
     if (node1->count < node2->count) {
         crm_trace("Assign %s before %s: fewer active instances on current node",
                   instance1->id, instance2->id);
         return -1;
 
     } else if (node1->count > node2->count) {
         crm_trace("Assign %s after %s: more active instances on current node",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance that isn't failed
     can1 = did_fail(instance1);
     can2 = did_fail(instance2);
     if (!can1 && can2) {
         crm_trace("Assign %s before %s: not failed",
                   instance1->id, instance2->id);
         return -1;
     } else if (can1 && !can2) {
         crm_trace("Assign %s after %s: failed",
                   instance1->id, instance2->id);
         return 1;
     }
 
     // Prefer instance with higher cumulative colocation score on current node
     rc = cmp_instance_by_colocation(instance1, instance2);
     if (rc != 0) {
         return rc;
     }
 
     // Prefer instance with lower instance number
     rc = pcmk__cmp_instance_number(instance1, instance2);
     if (rc < 0) {
         crm_trace("Assign %s before %s: instance number",
                   instance1->id, instance2->id);
     } else if (rc > 0) {
         crm_trace("Assign %s after %s: instance number",
                   instance1->id, instance2->id);
     } else {
         crm_trace("No assignment preference for %s vs. %s",
                   instance1->id, instance2->id);
     }
     return rc;
 }
 
 /*!
  * \internal
  * \brief Choose a node for an instance
  *
  * \param[in,out] instance      Clone instance or bundle replica container
  * \param[in]     prefer        If not NULL, attempt early assignment to this
  *                              node, if still the best choice; otherwise,
  *                              perform final assignment
  * \param[in]     max_per_node  Assign at most this many instances to one node
  *
  * \return true if \p instance could be assigned to a node, otherwise false
  */
 static bool
 assign_instance(pe_resource_t *instance, const pe_node_t *prefer,
                 int max_per_node)
 {
     pe_node_t *chosen = NULL;
     pe_node_t *allowed = NULL;
 
     CRM_ASSERT(instance != NULL);
     pe_rsc_trace(instance, "Assigning %s (preferring %s)", instance->id,
                  ((prefer == NULL)? "no node" : prefer->details->uname));
 
     CRM_CHECK(pcmk_is_set(instance->flags, pe_rsc_provisional),
               return (instance->fns->location(instance, NULL, FALSE) != NULL));
 
     if (pcmk_is_set(instance->flags, pe_rsc_allocating)) {
         pe_rsc_debug(instance,
                      "Assignment loop detected involving %s colocations",
                      instance->id);
         return false;
     }
 
     if (prefer != NULL) { // Possible early assignment to preferred node
 
         // Get preferred node with instance's scores
         allowed = g_hash_table_lookup(instance->allowed_nodes,
                                       prefer->details->id);
 
         if ((allowed == NULL) || (allowed->weight < 0)) {
             pe_rsc_trace(instance,
                          "Not assigning %s to preferred node %s: unavailable",
                          instance->id, pe__node_name(prefer));
             return false;
         }
     }
 
     ban_unavailable_allowed_nodes(instance, max_per_node);
 
     if (prefer == NULL) { // Final assignment
         chosen = instance->cmds->assign(instance, NULL);
 
     } else { // Possible early assignment to preferred node
-        GHashTable *backup = pcmk__copy_node_table(instance->allowed_nodes);
+        GHashTable *backup = NULL;
 
+        pcmk__copy_node_tables(instance, &backup);
         chosen = instance->cmds->assign(instance, prefer);
 
         // Revert nodes if preferred node won't be assigned
         if ((chosen != NULL) && !pe__same_node(chosen, prefer)) {
             crm_info("Not assigning %s to preferred node %s: %s is better",
                      instance->id, pe__node_name(prefer),
                      pe__node_name(chosen));
-            g_hash_table_destroy(instance->allowed_nodes);
-            instance->allowed_nodes = backup;
+            pcmk__restore_node_tables(instance, backup);
             pcmk__unassign_resource(instance);
             chosen = NULL;
-        } else if (backup != NULL) {
-            g_hash_table_destroy(backup);
         }
+        g_hash_table_destroy(backup);
     }
 
     // The parent tracks how many instances have been assigned to each node
     if (chosen != NULL) {
         allowed = pcmk__top_allowed_node(instance, chosen);
         if (allowed == NULL) {
             /* The instance is allowed on the node, but its parent isn't. This
              * shouldn't be possible if the resource is managed, and we won't be
              * able to limit the number of instances assigned to the node.
              */
             CRM_LOG_ASSERT(!pcmk_is_set(instance->flags, pe_rsc_managed));
 
         } else {
             allowed->count++;
         }
     }
     return chosen != NULL;
 }
 
 /*!
  * \internal
  * \brief Reset the node counts of a resource's allowed nodes to zero
  *
  * \param[in,out] rsc  Resource to reset
  *
  * \return Number of nodes that are available to run resources
  */
 static unsigned int
 reset_allowed_node_counts(pe_resource_t *rsc)
 {
     unsigned int available_nodes = 0;
     pe_node_t *node = NULL;
     GHashTableIter iter;
 
     g_hash_table_iter_init(&iter, rsc->allowed_nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         node->count = 0;
         if (pcmk__node_available(node, false, false)) {
             available_nodes++;
         }
     }
     return available_nodes;
 }
 
 /*!
  * \internal
  * \brief Check whether an instance has a preferred node
  *
  * \param[in] rsc               Clone or bundle being assigned (for logs only)
  * \param[in] instance          Clone instance or bundle replica container
  * \param[in] optimal_per_node  Optimal number of instances per node
  *
  * \return Instance's current node if still available, otherwise NULL
  */
 static const pe_node_t *
 preferred_node(const pe_resource_t *rsc, const pe_resource_t *instance,
                int optimal_per_node)
 {
     const pe_node_t *node = NULL;
     const pe_node_t *parent_node = NULL;
 
     // Check whether instance is active, healthy, and not yet assigned
     if ((instance->running_on == NULL)
         || !pcmk_is_set(instance->flags, pe_rsc_provisional)
         || pcmk_is_set(instance->flags, pe_rsc_failed)) {
         return NULL;
     }
 
     // Check whether instance's current node can run resources
     node = pe__current_node(instance);
     if (!pcmk__node_available(node, true, false)) {
         pe_rsc_trace(rsc, "Not assigning %s to %s early (unavailable)",
                      instance->id, pe__node_name(node));
         return NULL;
     }
 
     // Check whether node already has optimal number of instances assigned
     parent_node = pcmk__top_allowed_node(instance, node);
     if ((parent_node != NULL) && (parent_node->count >= optimal_per_node)) {
         pe_rsc_trace(rsc,
                      "Not assigning %s to %s early "
                      "(optimal instances already assigned)",
                      instance->id, pe__node_name(node));
         return NULL;
     }
 
     return node;
 }
 
 /*!
  * \internal
  * \brief Assign collective instances to nodes
  *
  * \param[in,out] collective    Clone or bundle resource being assigned
  * \param[in,out] instances     List of clone instances or bundle containers
  * \param[in]     max_total     Maximum instances to assign in total
  * \param[in]     max_per_node  Maximum instances to assign to any one node
  */
 void
 pcmk__assign_instances(pe_resource_t *collective, GList *instances,
                        int max_total, int max_per_node)
 {
     // Reuse node count to track number of assigned instances
     unsigned int available_nodes = reset_allowed_node_counts(collective);
 
     int optimal_per_node = 0;
     int assigned = 0;
     GList *iter = NULL;
     pe_resource_t *instance = NULL;
     const pe_node_t *current = NULL;
 
     if (available_nodes > 0) {
         optimal_per_node = max_total / available_nodes;
     }
     if (optimal_per_node < 1) {
         optimal_per_node = 1;
     }
 
     pe_rsc_debug(collective,
                  "Assigning up to %d %s instance%s to up to %u node%s "
                  "(at most %d per host, %d optimal)",
                  max_total, collective->id, pcmk__plural_s(max_total),
                  available_nodes, pcmk__plural_s(available_nodes),
                  max_per_node, optimal_per_node);
 
     // Assign as many instances as possible to their current location
     for (iter = instances; (iter != NULL) && (assigned < max_total);
          iter = iter->next) {
         instance = (pe_resource_t *) iter->data;
 
         current = preferred_node(collective, instance, optimal_per_node);
         if ((current != NULL)
             && assign_instance(instance, current, max_per_node)) {
             pe_rsc_trace(collective, "Assigned %s to current node %s",
                          instance->id, pe__node_name(current));
             assigned++;
         }
     }
 
     pe_rsc_trace(collective, "Assigned %d of %d instance%s to current node",
                  assigned, max_total, pcmk__plural_s(max_total));
 
     for (iter = instances; iter != NULL; iter = iter->next) {
         instance = (pe_resource_t *) iter->data;
 
         if (!pcmk_is_set(instance->flags, pe_rsc_provisional)) {
             continue; // Already assigned
         }
 
         if (instance->running_on != NULL) {
             current = pe__current_node(instance);
             if (pcmk__top_allowed_node(instance, current) == NULL) {
                 const char *unmanaged = "";
 
                 if (!pcmk_is_set(instance->flags, pe_rsc_managed)) {
                     unmanaged = "Unmanaged resource ";
                 }
                 crm_notice("%s%s is running on %s which is no longer allowed",
                            unmanaged, instance->id, pe__node_name(current));
             }
         }
 
         if (assigned >= max_total) {
             pe_rsc_debug(collective,
                          "Not assigning %s because maximum %d instances "
                          "already assigned",
                          instance->id, max_total);
             resource_location(instance, NULL, -INFINITY,
                               "collective_limit_reached", collective->cluster);
 
         } else if (assign_instance(instance, NULL, max_per_node)) {
             assigned++;
         }
     }
 
     pe_rsc_debug(collective, "Assigned %d of %d possible instance%s of %s",
                  assigned, max_total, pcmk__plural_s(max_total),
                  collective->id);
 }
 
 enum instance_state {
     instance_starting   = (1 << 0),
     instance_stopping   = (1 << 1),
 
     /* This indicates that some instance is restarting. It's not the same as
      * instance_starting|instance_stopping, which would indicate that some
      * instance is starting, and some instance (not necessarily the same one) is
      * stopping.
      */
     instance_restarting = (1 << 2),
 
     instance_active     = (1 << 3),
 
     instance_all        = instance_starting|instance_stopping
                           |instance_restarting|instance_active,
 };
 
 /*!
  * \internal
  * \brief Check whether an instance is active, starting, and/or stopping
  *
  * \param[in]     instance  Clone instance or bundle replica container
  * \param[in,out] state     Whether any instance is starting, stopping, etc.
  */
 static void
 check_instance_state(const pe_resource_t *instance, uint32_t *state)
 {
     const GList *iter = NULL;
     uint32_t instance_state = 0; // State of just this instance
 
     // No need to check further if all conditions have already been detected
     if (pcmk_all_flags_set(*state, instance_all)) {
         return;
     }
 
     // If instance is a collective (a cloned group), check its children instead
     if (instance->variant > pe_native) {
         for (iter = instance->children;
              (iter != NULL) && !pcmk_all_flags_set(*state, instance_all);
              iter = iter->next) {
             check_instance_state((const pe_resource_t *) iter->data, state);
         }
         return;
     }
 
     // If we get here, instance is a primitive
 
     if (instance->running_on != NULL) {
         instance_state |= instance_active;
     }
 
     // Check each of the instance's actions for runnable start or stop
     for (iter = instance->actions;
          (iter != NULL) && !pcmk_all_flags_set(instance_state,
                                                instance_starting
                                                |instance_stopping);
          iter = iter->next) {
 
         const pe_action_t *action = (const pe_action_t *) iter->data;
         const bool optional = pcmk_is_set(action->flags, pe_action_optional);
 
         if (pcmk__str_eq(RSC_START, action->task, pcmk__str_none)) {
             if (!optional && pcmk_is_set(action->flags, pe_action_runnable)) {
                 pe_rsc_trace(instance, "Instance is starting due to %s",
                              action->uuid);
                 instance_state |= instance_starting;
             } else {
                 pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
                              action->uuid, instance->id,
                              (optional? "optional" : "unrunnable"));
             }
 
         } else if (pcmk__str_eq(RSC_STOP, action->task, pcmk__str_none)) {
             /* Only stop actions can be pseudo-actions for primitives. That
              * indicates that the node they are on is being fenced, so the stop
              * is implied rather than actually executed.
              */
             if (!optional
                 && pcmk_any_flags_set(action->flags,
                                       pe_action_pseudo|pe_action_runnable)) {
                 pe_rsc_trace(instance, "Instance is stopping due to %s",
                              action->uuid);
                 instance_state |= instance_stopping;
             } else {
                 pe_rsc_trace(instance, "%s doesn't affect %s state (%s)",
                              action->uuid, instance->id,
                              (optional? "optional" : "unrunnable"));
             }
         }
     }
 
     if (pcmk_all_flags_set(instance_state,
                            instance_starting|instance_stopping)) {
         instance_state |= instance_restarting;
     }
     *state |= instance_state;
 }
 
 /*!
  * \internal
  * \brief Create actions for collective resource instances
  *
  * \param[in,out] collective    Clone or bundle resource to create actions for
  * \param[in,out] instances     List of clone instances or bundle containers
  */
 void
 pcmk__create_instance_actions(pe_resource_t *collective, GList *instances)
 {
     uint32_t state = 0;
 
     pe_action_t *stop = NULL;
     pe_action_t *stopped = NULL;
 
     pe_action_t *start = NULL;
     pe_action_t *started = NULL;
 
     pe_rsc_trace(collective, "Creating collective instance actions for %s",
                  collective->id);
 
     // Create actions for each instance appropriate to its variant
     for (GList *iter = instances; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         instance->cmds->create_actions(instance);
         check_instance_state(instance, &state);
     }
 
     // Create pseudo-actions for rsc start and started
     start = pe__new_rsc_pseudo_action(collective, RSC_START,
                                       !pcmk_is_set(state, instance_starting),
                                       true);
     started = pe__new_rsc_pseudo_action(collective, RSC_STARTED,
                                         !pcmk_is_set(state, instance_starting),
                                         false);
     started->priority = INFINITY;
     if (pcmk_any_flags_set(state, instance_active|instance_starting)) {
         pe__set_action_flags(started, pe_action_runnable);
     }
 
     // Create pseudo-actions for rsc stop and stopped
     stop = pe__new_rsc_pseudo_action(collective, RSC_STOP,
                                      !pcmk_is_set(state, instance_stopping),
                                      true);
     stopped = pe__new_rsc_pseudo_action(collective, RSC_STOPPED,
                                         !pcmk_is_set(state, instance_stopping),
                                         true);
     stopped->priority = INFINITY;
     if (!pcmk_is_set(state, instance_restarting)) {
         pe__set_action_flags(stop, pe_action_migrate_runnable);
     }
 
     if (collective->variant == pe_clone) {
         pe__create_clone_notif_pseudo_ops(collective, start, started, stop,
                                           stopped);
     }
 }
 
 /*!
  * \internal
  * \brief Get a list of clone instances or bundle replica containers
  *
  * \param[in] rsc  Clone or bundle resource
  *
  * \return Clone instances if \p rsc is a clone, or a newly created list of
  *         \p rsc's replica containers if \p rsc is a bundle
  * \note The caller must call free_instance_list() on the result when the list
  *       is no longer needed.
  */
 static inline GList *
 get_instance_list(const pe_resource_t *rsc)
 {
     if (rsc->variant == pe_container) {
         return pe__bundle_containers(rsc);
     } else {
         return rsc->children;
     }
 }
 
 /*!
  * \internal
  * \brief Free any memory created by get_instance_list()
  *
  * \param[in]     rsc   Clone or bundle resource passed to get_instance_list()
  * \param[in,out] list  Return value of get_instance_list() for \p rsc
  */
 static inline void
 free_instance_list(const pe_resource_t *rsc, GList *list)
 {
     if (list != rsc->children) {
         g_list_free(list);
     }
 }
 
 /*!
  * \internal
  * \brief Check whether an instance is compatible with a role and node
  *
  * \param[in] instance  Clone instance or bundle replica container
  * \param[in] node      Instance must match this node
  * \param[in] role      If not RSC_ROLE_UNKNOWN, instance must match this role
  * \param[in] current   If true, compare instance's original node and role,
  *                      otherwise compare assigned next node and role
  *
  * \return true if \p instance is compatible with \p node and \p role,
  *         otherwise false
  */
 bool
 pcmk__instance_matches(const pe_resource_t *instance, const pe_node_t *node,
                        enum rsc_role_e role, bool current)
 {
     pe_node_t *instance_node = NULL;
 
     CRM_CHECK((instance != NULL) && (node != NULL), return false);
 
     if ((role != RSC_ROLE_UNKNOWN)
         && (role != instance->fns->state(instance, current))) {
         pe_rsc_trace(instance,
                      "%s is not a compatible instance (role is not %s)",
                      instance->id, role2text(role));
         return false;
     }
 
     if (!is_set_recursive(instance, pe_rsc_block, true)) {
         // We only want instances that haven't failed
         instance_node = instance->fns->location(instance, NULL, current);
     }
 
     if (instance_node == NULL) {
         pe_rsc_trace(instance,
                      "%s is not a compatible instance (not assigned to a node)",
                      instance->id);
         return false;
     }
 
     if (!pe__same_node(instance_node, node)) {
         pe_rsc_trace(instance,
                      "%s is not a compatible instance (assigned to %s not %s)",
                      instance->id, pe__node_name(instance_node),
                      pe__node_name(node));
         return false;
     }
 
     return true;
 }
 
 /*!
  * \internal
  * \brief Find an instance that matches a given resource by node and role
  *
  * \param[in] match_rsc  Resource that instance must match (for logging only)
  * \param[in] rsc        Clone or bundle resource to check for matching instance
  * \param[in] node       Instance must match this node
  * \param[in] role       If not RSC_ROLE_UNKNOWN, instance must match this role
  * \param[in] current    If true, compare instance's original node and role,
  *                       otherwise compare assigned next node and role
  *
  * \return \p rsc instance matching \p node and \p role if any, otherwise NULL
  */
 static pe_resource_t *
 find_compatible_instance_on_node(const pe_resource_t *match_rsc,
                                  const pe_resource_t *rsc,
                                  const pe_node_t *node, enum rsc_role_e role,
                                  bool current)
 {
     GList *instances = NULL;
 
     instances = get_instance_list(rsc);
     for (GList *iter = instances; iter != NULL; iter = iter->next) {
         pe_resource_t *instance = (pe_resource_t *) iter->data;
 
         if (pcmk__instance_matches(instance, node, role, current)) {
             pe_rsc_trace(match_rsc,
                          "Found %s %s instance %s compatible with %s on %s",
                          role == RSC_ROLE_UNKNOWN? "matching" : role2text(role),
                          rsc->id, instance->id, match_rsc->id,
                          pe__node_name(node));
             free_instance_list(rsc, instances); // Only frees list, not contents
             return instance;
         }
     }
     free_instance_list(rsc, instances);
 
     pe_rsc_trace(match_rsc, "No %s %s instance found compatible with %s on %s",
                  ((role == RSC_ROLE_UNKNOWN)? "matching" : role2text(role)),
                  rsc->id, match_rsc->id, pe__node_name(node));
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Find a clone instance or bundle container compatible with a resource
  *
  * \param[in] match_rsc  Resource that instance must match
  * \param[in] rsc        Clone or bundle resource to check for matching instance
  * \param[in] role       If not RSC_ROLE_UNKNOWN, instance must match this role
  * \param[in] current    If true, compare instance's original node and role,
  *                       otherwise compare assigned next node and role
  *
  * \return Compatible (by \p role and \p match_rsc location) instance of \p rsc
  *         if any, otherwise NULL
  */
 pe_resource_t *
 pcmk__find_compatible_instance(const pe_resource_t *match_rsc,
                                const pe_resource_t *rsc, enum rsc_role_e role,
                                bool current)
 {
     pe_resource_t *instance = NULL;
     GList *nodes = NULL;
     const pe_node_t *node = match_rsc->fns->location(match_rsc, NULL, current);
 
     // If match_rsc has a node, check only that node
     if (node != NULL) {
         return find_compatible_instance_on_node(match_rsc, rsc, node, role,
                                                 current);
     }
 
     // Otherwise check for an instance matching any of match_rsc's allowed nodes
     nodes = pcmk__sort_nodes(g_hash_table_get_values(match_rsc->allowed_nodes),
                              NULL);
     for (GList *iter = nodes; (iter != NULL) && (instance == NULL);
          iter = iter->next) {
         instance = find_compatible_instance_on_node(match_rsc, rsc,
                                                     (pe_node_t *) iter->data,
                                                     role, current);
     }
 
     if (instance == NULL) {
         pe_rsc_debug(rsc, "No %s instance found compatible with %s",
                      rsc->id, match_rsc->id);
     }
     g_list_free(nodes);
     return instance;
 }
 
 /*!
  * \internal
  * \brief Unassign an instance if mandatory ordering has no interleave match
  *
  * \param[in]     first          'First' action in an ordering
  * \param[in]     then           'Then' action in an ordering
  * \param[in,out] then_instance  'Then' instance that has no interleave match
  * \param[in]     type           Group of enum pe_ordering flags to apply
  * \param[in]     current        If true, "then" action is stopped or demoted
  *
  * \return true if \p then_instance was unassigned, otherwise false
  */
 static bool
 unassign_if_mandatory(const pe_action_t *first, const pe_action_t *then,
                       pe_resource_t *then_instance, uint32_t type, bool current)
 {
     // Allow "then" instance to go down even without an interleave match
     if (current) {
         pe_rsc_trace(then->rsc,
                      "%s has no instance to order before stopping "
                      "or demoting %s",
                      first->rsc->id, then_instance->id);
 
     /* If the "first" action must be runnable, but there is no "first"
      * instance, the "then" instance must not be allowed to come up.
      */
     } else if (pcmk_any_flags_set(type, pe_order_runnable_left
                                         |pe_order_implies_then)) {
         pe_rsc_info(then->rsc,
                     "Inhibiting %s from being active "
                     "because there is no %s instance to interleave",
                     then_instance->id, first->rsc->id);
         return pcmk__assign_resource(then_instance, NULL, true);
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Find first matching action for a clone instance or bundle container
  *
  * \param[in] action       Action in an interleaved ordering
  * \param[in] instance     Clone instance or bundle container being interleaved
  * \param[in] action_name  Action to look for
  * \param[in] node         If not NULL, require action to be on this node
  * \param[in] for_first    If true, \p instance is the 'first' resource in the
  *                         ordering, otherwise it is the 'then' resource
  *
  * \return First action for \p instance (or in some cases if \p instance is a
  *         bundle container, its containerized resource) that matches
  *         \p action_name and \p node if any, otherwise NULL
  */
 static pe_action_t *
 find_instance_action(const pe_action_t *action, const pe_resource_t *instance,
                      const char *action_name, const pe_node_t *node,
                      bool for_first)
 {
     const pe_resource_t *rsc = NULL;
     pe_action_t *matching_action = NULL;
 
     /* If instance is a bundle container, sometimes we should interleave the
      * action for the container itself, and sometimes for the containerized
      * resource.
      *
      * For example, given "start bundle A then bundle B", B likely requires the
      * service inside A's container to be active, rather than just the
      * container, so we should interleave the action for A's containerized
      * resource. On the other hand, it's possible B's container itself requires
      * something from A, so we should interleave the action for B's container.
      *
      * Essentially, for 'first', we should use the containerized resource for
      * everything except stop, and for 'then', we should use the container for
      * everything except promote and demote (which can only be performed on the
      * containerized resource).
      */
     if ((for_first && !pcmk__str_any_of(action->task, CRMD_ACTION_STOP,
                                         CRMD_ACTION_STOPPED, NULL))
 
         || (!for_first && pcmk__str_any_of(action->task, CRMD_ACTION_PROMOTE,
                                            CRMD_ACTION_PROMOTED,
                                            CRMD_ACTION_DEMOTE,
                                            CRMD_ACTION_DEMOTED, NULL))) {
 
         rsc = pe__get_rsc_in_container(instance);
     }
     if (rsc == NULL) {
         rsc = instance; // No containerized resource, use instance itself
     } else {
         node = NULL; // Containerized actions are on bundle-created guest
     }
 
     matching_action = find_first_action(rsc->actions, NULL, action_name, node);
     if (matching_action != NULL) {
         return matching_action;
     }
 
     if (pcmk_is_set(instance->flags, pe_rsc_orphan)
         || pcmk__str_any_of(action_name, RSC_STOP, RSC_DEMOTE, NULL)) {
         crm_trace("No %s action found for %s%s",
                   action_name,
                   pcmk_is_set(instance->flags, pe_rsc_orphan)? "orphan " : "",
                   instance->id);
     } else {
         crm_err("No %s action found for %s to interleave (bug?)",
                 action_name, instance->id);
     }
     return NULL;
 }
 
 /*!
  * \internal
  * \brief Get the original action name of a bundle or clone action
  *
  * Given an action for a bundle or clone, get the original action name,
  * mapping notify to the action being notified, and if the instances are
  * primitives, mapping completion actions to the action that was completed
  * (for example, stopped to stop).
  *
  * \param[in] action  Clone or bundle action to check
  *
  * \return Original action name for \p action
  */
 static const char *
 orig_action_name(const pe_action_t *action)
 {
     const pe_resource_t *instance = action->rsc->children->data; // Any instance
     char *action_type = NULL;
     const char *action_name = action->task;
     enum action_tasks orig_task = no_action;
 
     if (pcmk__strcase_any_of(action->task, CRMD_ACTION_NOTIFY,
                              CRMD_ACTION_NOTIFIED, NULL)) {
         // action->uuid is RSC_(confirmed-){pre,post}_notify_ACTION_INTERVAL
         CRM_CHECK(parse_op_key(action->uuid, NULL, &action_type, NULL),
                   return task2text(no_action));
         action_name = strstr(action_type, "_notify_");
         CRM_CHECK(action_name != NULL, return task2text(no_action));
         action_name += strlen("_notify_");
     }
     orig_task = get_complex_task(instance, action_name);
     free(action_type);
     return task2text(orig_task);
 }
 
 /*!
  * \internal
  * \brief Update two interleaved actions according to an ordering between them
  *
  * Given information about an ordering of two interleaved actions, update the
  * actions' flags (and runnable_before members if appropriate) as appropriate
  * for the ordering. Effects may cascade to other orderings involving the
  * actions as well.
  *
  * \param[in,out] first     'First' action in an ordering
  * \param[in,out] then      'Then' action in an ordering
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 static uint32_t
 update_interleaved_actions(pe_action_t *first, pe_action_t *then,
                            const pe_node_t *node, uint32_t filter,
                            uint32_t type)
 {
     GList *instances = NULL;
     uint32_t changed = pcmk__updated_none;
     const char *orig_first_task = orig_action_name(first);
 
     // Stops and demotes must be interleaved with instance on current node
     bool current = pcmk__ends_with(first->uuid, "_" CRMD_ACTION_STOPPED "_0")
                    || pcmk__ends_with(first->uuid,
                                       "_" CRMD_ACTION_DEMOTED "_0");
 
     // Update the specified actions for each "then" instance individually
     instances = get_instance_list(then->rsc);
     for (GList *iter = instances; iter != NULL; iter = iter->next) {
         pe_resource_t *first_instance = NULL;
         pe_resource_t *then_instance = iter->data;
 
         pe_action_t *first_action = NULL;
         pe_action_t *then_action = NULL;
 
         // Find a "first" instance to interleave with this "then" instance
         first_instance = pcmk__find_compatible_instance(then_instance,
                                                         first->rsc,
                                                         RSC_ROLE_UNKNOWN,
                                                         current);
 
         if (first_instance == NULL) { // No instance can be interleaved
             if (unassign_if_mandatory(first, then, then_instance, type,
                                       current)) {
                 pcmk__set_updated_flags(changed, first, pcmk__updated_then);
             }
             continue;
         }
 
         first_action = find_instance_action(first, first_instance,
                                             orig_first_task, node, true);
         if (first_action == NULL) {
             continue;
         }
 
         then_action = find_instance_action(then, then_instance, then->task,
                                            node, false);
         if (then_action == NULL) {
             continue;
         }
 
         if (order_actions(first_action, then_action, type)) {
             pcmk__set_updated_flags(changed, first,
                                     pcmk__updated_first|pcmk__updated_then);
         }
 
         changed |= then_instance->cmds->update_ordered_actions(
             first_action, then_action, node,
             first_instance->cmds->action_flags(first_action, node), filter,
             type, then->rsc->cluster);
     }
     free_instance_list(then->rsc, instances);
     return changed;
 }
 
 /*!
  * \internal
  * \brief Check whether two actions in an ordering can be interleaved
  *
  * \param[in] first  'First' action in the ordering
  * \param[in] then   'Then' action in the ordering
  *
  * \return true if \p first and \p then can be interleaved, otherwise false
  */
 static bool
 can_interleave_actions(const pe_action_t *first, const pe_action_t *then)
 {
     bool interleave = false;
     pe_resource_t *rsc = NULL;
 
     if ((first->rsc == NULL) || (then->rsc == NULL)) {
         crm_trace("Not interleaving %s with %s: not resource actions",
                   first->uuid, then->uuid);
         return false;
     }
 
     if (first->rsc == then->rsc) {
         crm_trace("Not interleaving %s with %s: same resource",
                   first->uuid, then->uuid);
         return false;
     }
 
     if ((first->rsc->variant < pe_clone) || (then->rsc->variant < pe_clone)) {
         crm_trace("Not interleaving %s with %s: not clones or bundles",
                   first->uuid, then->uuid);
         return false;
     }
 
     if (pcmk__ends_with(then->uuid, "_stop_0")
         || pcmk__ends_with(then->uuid, "_demote_0")) {
         rsc = first->rsc;
     } else {
         rsc = then->rsc;
     }
 
     interleave = crm_is_true(g_hash_table_lookup(rsc->meta,
                                                  XML_RSC_ATTR_INTERLEAVE));
     pe_rsc_trace(rsc, "'%s then %s' will %sbe interleaved (based on %s)",
                  first->uuid, then->uuid, (interleave? "" : "not "), rsc->id);
     return interleave;
 }
 
 /*!
  * \internal
  * \brief Update non-interleaved instance actions according to an ordering
  *
  * Given information about an ordering of two non-interleaved actions, update
  * the actions' flags (and runnable_before members if appropriate) as
  * appropriate for the ordering. Effects may cascade to other orderings
  * involving the actions as well.
  *
  * \param[in,out] instance  Clone instance or bundle container
  * \param[in,out] first     "First" action in ordering
  * \param[in]     then      "Then" action in ordering (for \p instance's parent)
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  * \param[in]     flags     Action flags for \p first for ordering purposes
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 static uint32_t
 update_noninterleaved_actions(pe_resource_t *instance, pe_action_t *first,
                               const pe_action_t *then, const pe_node_t *node,
                               uint32_t flags, uint32_t filter, uint32_t type)
 {
     pe_action_t *instance_action = NULL;
     uint32_t instance_flags = 0;
     uint32_t changed = pcmk__updated_none;
 
     // Check whether instance has an equivalent of "then" action
     instance_action = find_first_action(instance->actions, NULL, then->task,
                                         node);
     if (instance_action == NULL) {
         return changed;
     }
 
     // Check whether action is runnable
     instance_flags = instance->cmds->action_flags(instance_action, node);
     if (!pcmk_is_set(instance_flags, pe_action_runnable)) {
         return changed;
     }
 
     // If so, update actions for the instance
     changed = instance->cmds->update_ordered_actions(first, instance_action,
                                                      node, flags, filter, type,
                                                      instance->cluster);
 
     // Propagate any changes to later actions
     if (pcmk_is_set(changed, pcmk__updated_then)) {
         for (GList *after_iter = instance_action->actions_after;
              after_iter != NULL; after_iter = after_iter->next) {
             pe_action_wrapper_t *after = after_iter->data;
 
             pcmk__update_action_for_orderings(after->action, instance->cluster);
         }
     }
 
     return changed;
 }
 
 /*!
  * \internal
  * \brief Update two actions according to an ordering between them
  *
  * Given information about an ordering of two clone or bundle actions, update
  * the actions' flags (and runnable_before members if appropriate) as
  * appropriate for the ordering. Effects may cascade to other orderings
  * involving the actions as well.
  *
  * \param[in,out] first     'First' action in an ordering
  * \param[in,out] then      'Then' action in an ordering
  * \param[in]     node      If not NULL, limit scope of ordering to this node
  *                          (only used when interleaving instances)
  * \param[in]     flags     Action flags for \p first for ordering purposes
  * \param[in]     filter    Action flags to limit scope of certain updates (may
  *                          include pe_action_optional to affect only mandatory
  *                          actions, and pe_action_runnable to affect only
  *                          runnable actions)
  * \param[in]     type      Group of enum pe_ordering flags to apply
  * \param[in,out] data_set  Cluster working set
  *
  * \return Group of enum pcmk__updated flags indicating what was updated
  */
 uint32_t
 pcmk__instance_update_ordered_actions(pe_action_t *first, pe_action_t *then,
                                       const pe_node_t *node, uint32_t flags,
                                       uint32_t filter, uint32_t type,
                                       pe_working_set_t *data_set)
 {
     CRM_ASSERT((first != NULL) && (then != NULL) && (data_set != NULL));
 
     if (then->rsc == NULL) {
         return pcmk__updated_none;
 
     } else if (can_interleave_actions(first, then)) {
         return update_interleaved_actions(first, then, node, filter, type);
 
     } else {
         uint32_t changed = pcmk__updated_none;
         GList *instances = get_instance_list(then->rsc);
 
         // Update actions for the clone or bundle resource itself
         changed |= pcmk__update_ordered_actions(first, then, node, flags,
                                                 filter, type, data_set);
 
         // Update the 'then' clone instances or bundle containers individually
         for (GList *iter = instances; iter != NULL; iter = iter->next) {
             pe_resource_t *instance = iter->data;
 
             changed |= update_noninterleaved_actions(instance, first, then,
                                                      node, flags, filter, type);
         }
         free_instance_list(then->rsc, instances);
         return changed;
     }
 }
 
 #define pe__clear_action_summary_flags(flags, action, flag) do {        \
         flags = pcmk__clear_flags_as(__func__, __LINE__, LOG_TRACE,     \
                                      "Action summary", action->rsc->id, \
                                      flags, flag, #flag);               \
     } while (0)
 
 /*!
  * \internal
  * \brief Return action flags for a given clone or bundle action
  *
  * \param[in,out] action     Action for a clone or bundle
  * \param[in]     instances  Clone instances or bundle containers
  * \param[in]     node       If not NULL, limit effects to this node
  *
  * \return Flags appropriate to \p action on \p node
  */
 uint32_t
 pcmk__collective_action_flags(pe_action_t *action, const GList *instances,
                               const pe_node_t *node)
 {
     bool any_runnable = false;
     const char *action_name = orig_action_name(action);
 
     // Set original assumptions (optional and runnable may be cleared below)
     uint32_t flags = pe_action_optional|pe_action_runnable|pe_action_pseudo;
 
     for (const GList *iter = instances; iter != NULL; iter = iter->next) {
         const pe_resource_t *instance = iter->data;
         const pe_node_t *instance_node = NULL;
         pe_action_t *instance_action = NULL;
         uint32_t instance_flags;
 
         // Node is relevant only to primitive instances
         if (instance->variant == pe_native) {
             instance_node = node;
         }
 
         instance_action = find_first_action(instance->actions, NULL,
                                             action_name, instance_node);
         if (instance_action == NULL) {
             pe_rsc_trace(action->rsc, "%s has no %s action on %s",
                          instance->id, action_name, pe__node_name(node));
             continue;
         }
 
         pe_rsc_trace(action->rsc, "%s has %s for %s on %s",
                      instance->id, instance_action->uuid, action_name,
                      pe__node_name(node));
 
         instance_flags = instance->cmds->action_flags(instance_action, node);
 
         // If any instance action is mandatory, so is the collective action
         if (pcmk_is_set(flags, pe_action_optional)
             && !pcmk_is_set(instance_flags, pe_action_optional)) {
             pe_rsc_trace(instance, "%s is mandatory because %s is",
                          action->uuid, instance_action->uuid);
             pe__clear_action_summary_flags(flags, action, pe_action_optional);
             pe__clear_action_flags(action, pe_action_optional);
         }
 
         // If any instance action is runnable, so is the collective action
         if (pcmk_is_set(instance_flags, pe_action_runnable)) {
             any_runnable = true;
         }
     }
 
     if (!any_runnable) {
         pe_rsc_trace(action->rsc,
                      "%s is not runnable because no instance can run %s",
                      action->uuid, action_name);
         pe__clear_action_summary_flags(flags, action, pe_action_runnable);
         if (node == NULL) {
             pe__clear_action_flags(action, pe_action_runnable);
         }
     }
 
     return flags;
 }
 
 /*!
  * \internal
  * \brief Add a collective resource's colocations to a list for an instance
  *
  * \param[in,out] list        Colocation list to add to
  * \param[in]     instance    Clone or bundle instance or instance group member
  * \param[in]     collective  Clone or bundle resource with colocations to add
  * \param[in]     with_this   If true, add collective's "with this" colocations,
  *                            otherwise add its "this with" colocations
  */
 void
 pcmk__add_collective_constraints(GList **list, const pe_resource_t *instance,
                                  const pe_resource_t *collective,
                                  bool with_this)
 {
     const GList *colocations = NULL;
     bool everywhere = false;
 
     CRM_CHECK((list != NULL) && (instance != NULL), return);
 
     if (collective == NULL) {
         return;
     }
     switch (collective->variant) {
         case pe_clone:
         case pe_container:
             break;
         default:
             return;
     }
 
     everywhere = can_run_everywhere(collective);
 
     if (with_this) {
         colocations = collective->rsc_cons_lhs;
     } else {
         colocations = collective->rsc_cons;
     }
 
     for (const GList *iter = colocations; iter != NULL; iter = iter->next) {
         const pcmk__colocation_t *colocation = iter->data;
 
         if (with_this
             && !pcmk__colocation_has_influence(colocation, instance)) {
            continue;
         }
         if (!everywhere || (colocation->score < 0)
             || (!with_this && (colocation->score == INFINITY))) {
 
             if (with_this) {
                 pcmk__add_with_this(list, colocation, instance);
             } else {
                 pcmk__add_this_with(list, colocation, instance);
             }
         }
     }
 }
diff --git a/lib/pacemaker/pcmk_sched_nodes.c b/lib/pacemaker/pcmk_sched_nodes.c
index 4645d4bcb7..92230d1e32 100644
--- a/lib/pacemaker/pcmk_sched_nodes.c
+++ b/lib/pacemaker/pcmk_sched_nodes.c
@@ -1,354 +1,430 @@
 /*
  * Copyright 2004-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 #include <crm/msg_xml.h>
 #include <crm/lrmd.h>       // lrmd_event_data_t
 #include <crm/common/xml_internal.h>
 #include <pacemaker-internal.h>
 #include <pacemaker.h>
 #include "libpacemaker_private.h"
 
 /*!
  * \internal
  * \brief Check whether a node is available to run resources
  *
  * \param[in] node            Node to check
  * \param[in] consider_score  If true, consider a negative score unavailable
  * \param[in] consider_guest  If true, consider a guest node unavailable whose
  *                            resource will not be active
  *
  * \return true if node is online and not shutting down, unclean, or in standby
  *         or maintenance mode, otherwise false
  */
 bool
 pcmk__node_available(const pe_node_t *node, bool consider_score,
                      bool consider_guest)
 {
     if ((node == NULL) || (node->details == NULL) || !node->details->online
             || node->details->shutdown || node->details->unclean
             || node->details->standby || node->details->maintenance) {
         return false;
     }
 
     if (consider_score && (node->weight < 0)) {
         return false;
     }
 
     // @TODO Go through all callers to see which should set consider_guest
     if (consider_guest && pe__is_guest_node(node)) {
         pe_resource_t *guest = node->details->remote_rsc->container;
 
         if (guest->fns->location(guest, NULL, FALSE) == NULL) {
             return false;
         }
     }
 
     return true;
 }
 
 /*!
  * \internal
  * \brief Copy a hash table of node objects
  *
  * \param[in] nodes  Hash table to copy
  *
  * \return New copy of nodes (or NULL if nodes is NULL)
  */
 GHashTable *
 pcmk__copy_node_table(GHashTable *nodes)
 {
     GHashTable *new_table = NULL;
     GHashTableIter iter;
     pe_node_t *node = NULL;
 
     if (nodes == NULL) {
         return NULL;
     }
     new_table = pcmk__strkey_table(NULL, free);
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (gpointer *) &node)) {
         pe_node_t *new_node = pe__copy_node(node);
 
         g_hash_table_insert(new_table, (gpointer) new_node->details->id,
                             new_node);
     }
     return new_table;
 }
 
+/*!
+ * \internal
+ * \brief Free a table of node tables
+ *
+ * \param[in,out] data  Table to free
+ *
+ * \note This is a \c GDestroyNotify wrapper for \c g_hash_table_destroy().
+ */
+static void
+destroy_node_tables(gpointer data)
+{
+    g_hash_table_destroy((GHashTable *) data);
+}
+
+/*!
+ * \internal
+ * \brief Recursively copy the node tables of a resource
+ *
+ * Build a hash table containing copies of the allowed nodes tables of \p rsc
+ * and its entire tree of descendants. The key is the resource ID, and the value
+ * is a copy of the resource's node table.
+ *
+ * \param[in]     rsc   Resource whose node table to copy
+ * \param[in,out] copy  Where to store the copied node tables
+ *
+ * \note \p *copy should be \c NULL for the top-level call.
+ * \note The caller is responsible for freeing \p copy using
+ *       \c g_hash_table_destroy().
+ */
+void
+pcmk__copy_node_tables(const pe_resource_t *rsc, GHashTable **copy)
+{
+    CRM_ASSERT((rsc != NULL) && (copy != NULL));
+
+    if (*copy == NULL) {
+        *copy = pcmk__strkey_table(NULL, destroy_node_tables);
+    }
+
+    g_hash_table_insert(*copy, rsc->id,
+                        pcmk__copy_node_table(rsc->allowed_nodes));
+
+    for (const GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+        pcmk__copy_node_tables((const pe_resource_t *) iter->data, copy);
+    }
+}
+
+/*!
+ * \internal
+ * \brief Recursively restore the node tables of a resource from backup
+ *
+ * Given a hash table containing backup copies of the allowed nodes tables of
+ * \p rsc and its entire tree of descendants, replace the resources' current
+ * node tables with the backed-up copies.
+ *
+ * \param[in,out] rsc     Resource whose node tables to restore
+ * \param[in]     backup  Table of backup node tables (created by
+ *                        \c pcmk__copy_node_tables())
+ *
+ * \note This function frees the resources' current node tables.
+ */
+void
+pcmk__restore_node_tables(pe_resource_t *rsc, GHashTable *backup)
+{
+    CRM_ASSERT((rsc != NULL) && (backup != NULL));
+
+    g_hash_table_destroy(rsc->allowed_nodes);
+
+    // Copy to avoid danger with multiple restores
+    rsc->allowed_nodes = g_hash_table_lookup(backup, rsc->id);
+    rsc->allowed_nodes = pcmk__copy_node_table(rsc->allowed_nodes);
+
+    for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+        pcmk__restore_node_tables((pe_resource_t *) iter->data, backup);
+    }
+}
+
 /*!
  * \internal
  * \brief Copy a list of node objects
  *
  * \param[in] list   List to copy
  * \param[in] reset  Set copies' scores to 0
  *
  * \return New list of shallow copies of nodes in original list
  */
 GList *
 pcmk__copy_node_list(const GList *list, bool reset)
 {
     GList *result = NULL;
 
     for (const GList *iter = list; iter != NULL; iter = iter->next) {
         pe_node_t *new_node = NULL;
         pe_node_t *this_node = iter->data;
 
         new_node = pe__copy_node(this_node);
         if (reset) {
             new_node->weight = 0;
         }
         result = g_list_prepend(result, new_node);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Compare two nodes for assignment preference
  *
  * Given two nodes, check which one is more preferred by assignment criteria
  * such as node score and utilization.
  *
  * \param[in] a     First node to compare
  * \param[in] b     Second node to compare
  * \param[in] data  Node that resource being assigned is active on, if any
  *
  * \return -1 if \p a is preferred, +1 if \p b is preferred, or 0 if they are
  *         equally preferred
  */
 static gint
 compare_nodes(gconstpointer a, gconstpointer b, gpointer data)
 {
     const pe_node_t *node1 = (const pe_node_t *) a;
     const pe_node_t *node2 = (const pe_node_t *) b;
     const pe_node_t *active = (const pe_node_t *) data;
 
     int node1_score = -INFINITY;
     int node2_score = -INFINITY;
 
     int result = 0;
 
     if (a == NULL) {
         return 1;
     }
     if (b == NULL) {
         return -1;
     }
 
     // Compare node scores
 
     if (pcmk__node_available(node1, false, false)) {
         node1_score = node1->weight;
     }
     if (pcmk__node_available(node2, false, false)) {
         node2_score = node2->weight;
     }
 
     if (node1_score > node2_score) {
         crm_trace("%s (%d) > %s (%d) : score",
                   pe__node_name(node1), node1_score, pe__node_name(node2),
                   node2_score);
         return -1;
     }
 
     if (node1_score < node2_score) {
         crm_trace("%s (%d) < %s (%d) : score",
                   pe__node_name(node1), node1_score, pe__node_name(node2),
                   node2_score);
         return 1;
     }
 
     crm_trace("%s (%d) == %s (%d) : score",
               pe__node_name(node1), node1_score, pe__node_name(node2),
               node2_score);
 
     // If appropriate, compare node utilization
 
     if (pcmk__str_eq(node1->details->data_set->placement_strategy, "minimal",
                      pcmk__str_casei)) {
         goto equal;
     }
 
     if (pcmk__str_eq(node1->details->data_set->placement_strategy, "balanced",
                      pcmk__str_casei)) {
         result = pcmk__compare_node_capacities(node1, node2);
         if (result < 0) {
             crm_trace("%s > %s : capacity (%d)",
                       pe__node_name(node1), pe__node_name(node2), result);
             return -1;
         } else if (result > 0) {
             crm_trace("%s < %s : capacity (%d)",
                       pe__node_name(node1), pe__node_name(node2), result);
             return 1;
         }
     }
 
     // Compare number of resources already assigned to node
 
     if (node1->details->num_resources < node2->details->num_resources) {
         crm_trace("%s (%d) > %s (%d) : resources",
                   pe__node_name(node1), node1->details->num_resources,
                   pe__node_name(node2), node2->details->num_resources);
         return -1;
 
     } else if (node1->details->num_resources > node2->details->num_resources) {
         crm_trace("%s (%d) < %s (%d) : resources",
                   pe__node_name(node1), node1->details->num_resources,
                   pe__node_name(node2), node2->details->num_resources);
         return 1;
     }
 
     // Check whether one node is already running desired resource
 
     if (active != NULL) {
         if (pe__same_node(active, node1)) {
             crm_trace("%s (%d) > %s (%d) : active",
                       pe__node_name(node1), node1->details->num_resources,
                       pe__node_name(node2), node2->details->num_resources);
             return -1;
         } else if (pe__same_node(active, node2)) {
             crm_trace("%s (%d) < %s (%d) : active",
                       pe__node_name(node1), node1->details->num_resources,
                       pe__node_name(node2), node2->details->num_resources);
             return 1;
         }
     }
 
     // If all else is equal, prefer node with lowest-sorting name
 equal:
     crm_trace("%s = %s", pe__node_name(node1), pe__node_name(node2));
     return strcmp(node1->details->uname, node2->details->uname);
 }
 
 /*!
  * \internal
  * \brief Sort a list of nodes by assigment preference
  *
  * \param[in,out] nodes        Node list to sort
  * \param[in]     active_node  Node where resource being assigned is active
  *
  * \return New head of sorted list
  */
 GList *
 pcmk__sort_nodes(GList *nodes, pe_node_t *active_node)
 {
     return g_list_sort_with_data(nodes, compare_nodes, active_node);
 }
 
 /*!
  * \internal
  * \brief Check whether any node is available to run resources
  *
  * \param[in] nodes  Nodes to check
  *
  * \return true if any node in \p nodes is available to run resources,
  *         otherwise false
  */
 bool
 pcmk__any_node_available(GHashTable *nodes)
 {
     GHashTableIter iter;
     const pe_node_t *node = NULL;
 
     if (nodes == NULL) {
         return false;
     }
     g_hash_table_iter_init(&iter, nodes);
     while (g_hash_table_iter_next(&iter, NULL, (void **) &node)) {
         if (pcmk__node_available(node, true, false)) {
             return true;
         }
     }
     return false;
 }
 
 /*!
  * \internal
  * \brief Apply node health values for all nodes in cluster
  *
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__apply_node_health(pe_working_set_t *data_set)
 {
     int base_health = 0;
     enum pcmk__health_strategy strategy;
     const char *strategy_str = pe_pref(data_set->config_hash,
                                        PCMK__OPT_NODE_HEALTH_STRATEGY);
 
     strategy = pcmk__parse_health_strategy(strategy_str);
     if (strategy == pcmk__health_strategy_none) {
         return;
     }
     crm_info("Applying node health strategy '%s'", strategy_str);
 
     // The progressive strategy can use a base health score
     if (strategy == pcmk__health_strategy_progressive) {
         base_health = pe__health_score(PCMK__OPT_NODE_HEALTH_BASE, data_set);
     }
 
     for (GList *iter = data_set->nodes; iter != NULL; iter = iter->next) {
         pe_node_t *node = (pe_node_t *) iter->data;
         int health = pe__sum_node_health_scores(node, base_health);
 
         // An overall health score of 0 has no effect
         if (health == 0) {
             continue;
         }
         crm_info("Overall system health of %s is %d",
                  pe__node_name(node), health);
 
         // Use node health as a location score for each resource on the node
         for (GList *r = data_set->resources; r != NULL; r = r->next) {
             pe_resource_t *rsc = (pe_resource_t *) r->data;
 
             bool constrain = true;
 
             if (health < 0) {
                 /* Negative health scores do not apply to resources with
                  * allow-unhealthy-nodes=true.
                  */
                 constrain = !crm_is_true(g_hash_table_lookup(rsc->meta,
                                          PCMK__META_ALLOW_UNHEALTHY_NODES));
             }
             if (constrain) {
                 pcmk__new_location(strategy_str, rsc, health, NULL, node);
             } else {
                 pe_rsc_trace(rsc, "%s is immune from health ban on %s",
                              rsc->id, pe__node_name(node));
             }
         }
     }
 }
 
 /*!
  * \internal
  * \brief Check for a node in a resource's parent's allowed nodes
  *
  * \param[in] rsc   Resource whose parent should be checked
  * \param[in] node  Node to check for
  *
  * \return Equivalent of \p node from \p rsc's parent's allowed nodes if any,
  *         otherwise NULL
  */
 pe_node_t *
 pcmk__top_allowed_node(const pe_resource_t *rsc, const pe_node_t *node)
 {
     GHashTable *allowed_nodes = NULL;
 
     if ((rsc == NULL) || (node == NULL)) {
         return NULL;
     } else if (rsc->parent == NULL) {
         allowed_nodes = rsc->allowed_nodes;
     } else {
         allowed_nodes = rsc->parent->allowed_nodes;
     }
     return pe_hash_table_lookup(allowed_nodes, node->details->id);
 }
diff --git a/lib/pacemaker/pcmk_sched_resource.c b/lib/pacemaker/pcmk_sched_resource.c
index 0b301a811b..10c14ac428 100644
--- a/lib/pacemaker/pcmk_sched_resource.c
+++ b/lib/pacemaker/pcmk_sched_resource.c
@@ -1,736 +1,749 @@
 /*
  * Copyright 2014-2023 the Pacemaker project contributors
  *
  * The version control history for this file may have further details.
  *
  * This source code is licensed under the GNU General Public License version 2
  * or later (GPLv2+) WITHOUT ANY WARRANTY.
  */
 
 #include <crm_internal.h>
 
 #include <stdlib.h>
 #include <string.h>
 #include <crm/msg_xml.h>
 #include <pacemaker-internal.h>
 
 #include "libpacemaker_private.h"
 
 // Resource assignment methods by resource variant
 static resource_alloc_functions_t assignment_methods[] = {
     {
         pcmk__primitive_assign,
         pcmk__primitive_create_actions,
         pcmk__probe_rsc_on_node,
         pcmk__primitive_internal_constraints,
         pcmk__primitive_apply_coloc_score,
         pcmk__colocated_resources,
         pcmk__with_primitive_colocations,
         pcmk__primitive_with_colocations,
         pcmk__add_colocated_node_scores,
         pcmk__apply_location,
         pcmk__primitive_action_flags,
         pcmk__update_ordered_actions,
         pcmk__output_resource_actions,
         pcmk__add_rsc_actions_to_graph,
         pcmk__primitive_add_graph_meta,
         pcmk__primitive_add_utilization,
         pcmk__primitive_shutdown_lock,
     },
     {
         pcmk__group_assign,
         pcmk__group_create_actions,
         pcmk__probe_rsc_on_node,
         pcmk__group_internal_constraints,
         pcmk__group_apply_coloc_score,
         pcmk__group_colocated_resources,
         pcmk__with_group_colocations,
         pcmk__group_with_colocations,
         pcmk__group_add_colocated_node_scores,
         pcmk__group_apply_location,
         pcmk__group_action_flags,
         pcmk__group_update_ordered_actions,
         pcmk__output_resource_actions,
         pcmk__add_rsc_actions_to_graph,
         pcmk__noop_add_graph_meta,
         pcmk__group_add_utilization,
         pcmk__group_shutdown_lock,
     },
     {
         pcmk__clone_assign,
         pcmk__clone_create_actions,
         pcmk__clone_create_probe,
         pcmk__clone_internal_constraints,
         pcmk__clone_apply_coloc_score,
         pcmk__colocated_resources,
         pcmk__with_clone_colocations,
         pcmk__clone_with_colocations,
         pcmk__add_colocated_node_scores,
         pcmk__clone_apply_location,
         pcmk__clone_action_flags,
         pcmk__instance_update_ordered_actions,
         pcmk__output_resource_actions,
         pcmk__clone_add_actions_to_graph,
         pcmk__clone_add_graph_meta,
         pcmk__clone_add_utilization,
         pcmk__clone_shutdown_lock,
     },
     {
         pcmk__bundle_assign,
         pcmk__bundle_create_actions,
         pcmk__bundle_create_probe,
         pcmk__bundle_internal_constraints,
         pcmk__bundle_apply_coloc_score,
         pcmk__colocated_resources,
         pcmk__with_bundle_colocations,
         pcmk__bundle_with_colocations,
         pcmk__add_colocated_node_scores,
         pcmk__bundle_apply_location,
         pcmk__bundle_action_flags,
         pcmk__instance_update_ordered_actions,
         pcmk__output_bundle_actions,
         pcmk__bundle_add_actions_to_graph,
         pcmk__noop_add_graph_meta,
         pcmk__bundle_add_utilization,
         pcmk__bundle_shutdown_lock,
     }
 };
 
 /*!
  * \internal
  * \brief Check whether a resource's agent standard, provider, or type changed
  *
  * \param[in,out] rsc             Resource to check
  * \param[in,out] node            Node needing unfencing if agent changed
  * \param[in]     rsc_entry       XML with previously known agent information
  * \param[in]     active_on_node  Whether \p rsc is active on \p node
  *
  * \return true if agent for \p rsc changed, otherwise false
  */
 bool
 pcmk__rsc_agent_changed(pe_resource_t *rsc, pe_node_t *node,
                         const xmlNode *rsc_entry, bool active_on_node)
 {
     bool changed = false;
     const char *attr_list[] = {
         XML_ATTR_TYPE,
         XML_AGENT_ATTR_CLASS,
         XML_AGENT_ATTR_PROVIDER
     };
 
     for (int i = 0; i < PCMK__NELEM(attr_list); i++) {
         const char *value = crm_element_value(rsc->xml, attr_list[i]);
         const char *old_value = crm_element_value(rsc_entry, attr_list[i]);
 
         if (!pcmk__str_eq(value, old_value, pcmk__str_none)) {
             changed = true;
             trigger_unfencing(rsc, node, "Device definition changed", NULL,
                               rsc->cluster);
             if (active_on_node) {
                 crm_notice("Forcing restart of %s on %s "
                            "because %s changed from '%s' to '%s'",
                            rsc->id, pe__node_name(node), attr_list[i],
                            pcmk__s(old_value, ""), pcmk__s(value, ""));
             }
         }
     }
     if (changed && active_on_node) {
         // Make sure the resource is restarted
         custom_action(rsc, stop_key(rsc), CRMD_ACTION_STOP, node, FALSE, TRUE,
                       rsc->cluster);
         pe__set_resource_flags(rsc, pe_rsc_start_pending);
     }
     return changed;
 }
 
 /*!
  * \internal
  * \brief Add resource (and any matching children) to list if it matches ID
  *
  * \param[in] result  List to add resource to
  * \param[in] rsc     Resource to check
  * \param[in] id      ID to match
  *
  * \return (Possibly new) head of list
  */
 static GList *
 add_rsc_if_matching(GList *result, pe_resource_t *rsc, const char *id)
 {
     if ((strcmp(rsc->id, id) == 0)
         || ((rsc->clone_name != NULL) && (strcmp(rsc->clone_name, id) == 0))) {
         result = g_list_prepend(result, rsc);
     }
     for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
         pe_resource_t *child = (pe_resource_t *) iter->data;
 
         result = add_rsc_if_matching(result, child, id);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Find all resources matching a given ID by either ID or clone name
  *
  * \param[in] id        Resource ID to check
  * \param[in] data_set  Cluster working set
  *
  * \return List of all resources that match \p id
  * \note The caller is responsible for freeing the return value with
  *       g_list_free().
  */
 GList *
 pcmk__rscs_matching_id(const char *id, const pe_working_set_t *data_set)
 {
     GList *result = NULL;
 
     CRM_CHECK((id != NULL) && (data_set != NULL), return NULL);
     for (GList *iter = data_set->resources; iter != NULL; iter = iter->next) {
         result = add_rsc_if_matching(result, (pe_resource_t *) iter->data, id);
     }
     return result;
 }
 
 /*!
  * \internal
  * \brief Set the variant-appropriate assignment methods for a resource
  *
  * \param[in,out] data       Resource to set assignment methods for
  * \param[in]     user_data  Ignored
  */
 static void
 set_assignment_methods_for_rsc(gpointer data, gpointer user_data)
 {
     pe_resource_t *rsc = data;
 
     rsc->cmds = &assignment_methods[rsc->variant];
     g_list_foreach(rsc->children, set_assignment_methods_for_rsc, NULL);
 }
 
 /*!
  * \internal
  * \brief Set the variant-appropriate assignment methods for all resources
  *
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__set_assignment_methods(pe_working_set_t *data_set)
 {
     g_list_foreach(data_set->resources, set_assignment_methods_for_rsc, NULL);
 }
 
 /*!
  * \internal
  * \brief Wrapper for colocated_resources() method for readability
  *
  * \param[in]      rsc       Resource to add to colocated list
  * \param[in]      orig_rsc  Resource originally requested
  * \param[in,out]  list      Pointer to list to add to
  *
  * \return (Possibly new) head of list
  */
 static inline void
 add_colocated_resources(const pe_resource_t *rsc, const pe_resource_t *orig_rsc,
                         GList **list)
 {
     *list = rsc->cmds->colocated_resources(rsc, orig_rsc, *list);
 }
 
 // Shared implementation of resource_alloc_functions_t:colocated_resources()
 GList *
 pcmk__colocated_resources(const pe_resource_t *rsc,
                           const pe_resource_t *orig_rsc, GList *colocated_rscs)
 {
     const GList *iter = NULL;
     GList *colocations = NULL;
 
     if (orig_rsc == NULL) {
         orig_rsc = rsc;
     }
 
     if ((rsc == NULL) || (g_list_find(colocated_rscs, rsc) != NULL)) {
         return colocated_rscs;
     }
 
     pe_rsc_trace(orig_rsc, "%s is in colocation chain with %s",
                  rsc->id, orig_rsc->id);
     colocated_rscs = g_list_prepend(colocated_rscs, (gpointer) rsc);
 
     // Follow colocations where this resource is the dependent resource
     colocations = pcmk__this_with_colocations(rsc);
     for (iter = colocations; iter != NULL; iter = iter->next) {
         const pcmk__colocation_t *constraint = iter->data;
         const pe_resource_t *primary = constraint->primary;
 
         if (primary == orig_rsc) {
             continue; // Break colocation loop
         }
 
         if ((constraint->score == INFINITY) &&
             (pcmk__colocation_affects(rsc, primary, constraint,
                                       true) == pcmk__coloc_affects_location)) {
             add_colocated_resources(primary, orig_rsc, &colocated_rscs);
         }
     }
     g_list_free(colocations);
 
     // Follow colocations where this resource is the primary resource
     colocations = pcmk__with_this_colocations(rsc);
     for (iter = colocations; iter != NULL; iter = iter->next) {
         const pcmk__colocation_t *constraint = iter->data;
         const pe_resource_t *dependent = constraint->dependent;
 
         if (dependent == orig_rsc) {
             continue; // Break colocation loop
         }
 
         if (pe_rsc_is_clone(rsc) && !pe_rsc_is_clone(dependent)) {
             continue; // We can't be sure whether dependent will be colocated
         }
 
         if ((constraint->score == INFINITY) &&
             (pcmk__colocation_affects(dependent, rsc, constraint,
                                       true) == pcmk__coloc_affects_location)) {
             add_colocated_resources(dependent, orig_rsc, &colocated_rscs);
         }
     }
     g_list_free(colocations);
 
     return colocated_rscs;
 }
 
 // No-op function for variants that don't need to implement add_graph_meta()
 void
 pcmk__noop_add_graph_meta(const pe_resource_t *rsc, xmlNode *xml)
 {
 }
 
 /*!
  * \internal
  * \brief Output a summary of scheduled actions for a resource
  *
  * \param[in,out] rsc  Resource to output actions for
  */
 void
 pcmk__output_resource_actions(pe_resource_t *rsc)
 {
     pe_node_t *next = NULL;
     pe_node_t *current = NULL;
     pcmk__output_t *out = NULL;
 
     CRM_ASSERT(rsc != NULL);
 
     out = rsc->cluster->priv;
     if (rsc->children != NULL) {
         for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
             pe_resource_t *child = (pe_resource_t *) iter->data;
 
             child->cmds->output_actions(child);
         }
         return;
     }
 
     next = rsc->allocated_to;
     if (rsc->running_on) {
         current = pe__current_node(rsc);
         if (rsc->role == RSC_ROLE_STOPPED) {
             /* This can occur when resources are being recovered because
              * the current role can change in pcmk__primitive_create_actions()
              */
             rsc->role = RSC_ROLE_STARTED;
         }
     }
 
     if ((current == NULL) && pcmk_is_set(rsc->flags, pe_rsc_orphan)) {
         /* Don't log stopped orphans */
         return;
     }
 
     out->message(out, "rsc-action", rsc, current, next);
 }
 
 /*!
  * \internal
  * \brief Add a resource to a node's list of assigned resources
  *
  * \param[in,out] node  Node to add resource to
  * \param[in]     rsc   Resource to add
  */
 static inline void
 add_assigned_resource(pe_node_t *node, pe_resource_t *rsc)
 {
     node->details->allocated_rsc = g_list_prepend(node->details->allocated_rsc,
                                                   rsc);
 }
 
 /*!
  * \internal
  * \brief Assign a specified resource (of any variant) to a node
  *
  * Assign a specified resource and its children (if any) to a specified node, if
  * the node can run the resource (or unconditionally, if \p force is true). Mark
  * the resources as no longer provisional. If a resource can't be assigned (or
  * \p node is \c NULL), unassign any previous assignment, set next role to
  * stopped, and update any existing actions scheduled for it.
  *
  * \param[in,out] rsc    Resource to assign
  * \param[in,out] node   Node to assign \p rsc to
  * \param[in]     force  If true, assign to \p node even if unavailable
  *
  * \return \c true if the assignment of \p rsc changed, or \c false otherwise
  *
  * \note Assigning a resource to the NULL node using this function is different
  *       from calling pcmk__unassign_resource(), in that it will also update any
  *       actions created for the resource.
  * \note The \c resource_alloc_functions_t:assign() method is preferred, unless
  *       a resource should be assigned to the \c NULL node or every resource in
  *       a tree should be assigned to the same node.
  */
 bool
 pcmk__assign_resource(pe_resource_t *rsc, pe_node_t *node, bool force)
 {
     bool changed = false;
 
     CRM_ASSERT(rsc != NULL);
 
     if (rsc->children != NULL) {
         for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
             pe_resource_t *child_rsc = iter->data;
 
             changed |= pcmk__assign_resource(child_rsc, node, force);
         }
         return changed;
     }
 
     // Assigning a primitive
 
     if (!force && (node != NULL)
         && ((node->weight < 0)
             // Allow graph to assume that guest node connections will come up
             || (!pcmk__node_available(node, true, false)
                 && !pe__is_guest_node(node)))) {
 
         pe_rsc_debug(rsc,
                      "All nodes for resource %s are unavailable, unclean or "
                      "shutting down (%s can%s run resources, with score %s)",
                      rsc->id, pe__node_name(node),
                      (pcmk__node_available(node, true, false)? "" : "not"),
                      pcmk_readable_score(node->weight));
         pe__set_next_role(rsc, RSC_ROLE_STOPPED, "node availability");
         node = NULL;
     }
 
     if (rsc->allocated_to != NULL) {
         changed = !pe__same_node(rsc->allocated_to, node);
     } else {
         changed = (node != NULL);
     }
     pcmk__unassign_resource(rsc);
     pe__clear_resource_flags(rsc, pe_rsc_provisional);
 
     if (node == NULL) {
         char *rc_stopped = NULL;
 
         pe_rsc_debug(rsc, "Could not assign %s to a node", rsc->id);
         pe__set_next_role(rsc, RSC_ROLE_STOPPED, "unable to assign");
 
         for (GList *iter = rsc->actions; iter != NULL; iter = iter->next) {
             pe_action_t *op = (pe_action_t *) iter->data;
 
             pe_rsc_debug(rsc, "Updating %s for %s assignment failure",
                          op->uuid, rsc->id);
 
             if (pcmk__str_eq(op->task, RSC_STOP, pcmk__str_none)) {
                 pe__clear_action_flags(op, pe_action_optional);
 
             } else if (pcmk__str_eq(op->task, RSC_START, pcmk__str_none)) {
                 pe__clear_action_flags(op, pe_action_runnable);
 
             } else {
                 // Cancel recurring actions, unless for stopped state
                 const char *interval_ms_s = NULL;
                 const char *target_rc_s = NULL;
 
                 interval_ms_s = g_hash_table_lookup(op->meta,
                                                     XML_LRM_ATTR_INTERVAL_MS);
                 target_rc_s = g_hash_table_lookup(op->meta,
                                                   XML_ATTR_TE_TARGET_RC);
                 if (rc_stopped == NULL) {
                     rc_stopped = pcmk__itoa(PCMK_OCF_NOT_RUNNING);
                 }
 
                 if (!pcmk__str_eq(interval_ms_s, "0", pcmk__str_null_matches)
                     && !pcmk__str_eq(rc_stopped, target_rc_s, pcmk__str_none)) {
 
                     pe__clear_action_flags(op, pe_action_runnable);
                 }
             }
         }
         free(rc_stopped);
         return changed;
     }
 
     pe_rsc_debug(rsc, "Assigning %s to %s", rsc->id, pe__node_name(node));
     rsc->allocated_to = pe__copy_node(node);
 
     add_assigned_resource(node, rsc);
     node->details->num_resources++;
     node->count++;
     pcmk__consume_node_capacity(node->details->utilization, rsc);
 
     if (pcmk_is_set(rsc->cluster->flags, pe_flag_show_utilization)) {
         pcmk__output_t *out = rsc->cluster->priv;
 
         out->message(out, "resource-util", rsc, node, __func__);
     }
     return changed;
 }
 
 /*!
  * \internal
- * \brief Remove any assignment of a specified resource to a node
+ * \brief Remove any node assignment from a specified resource and its children
  *
  * If a specified resource has been assigned to a node, remove that assignment
- * and mark the resource as provisional again. This is not done recursively for
- * children, so it should be called only for primitives.
+ * and mark the resource as provisional again.
  *
  * \param[in,out] rsc  Resource to unassign
+ *
+ * \note This function is called recursively on \p rsc and its children.
  */
 void
 pcmk__unassign_resource(pe_resource_t *rsc)
 {
     pe_node_t *old = rsc->allocated_to;
 
     if (old == NULL) {
-        return;
+        crm_info("Unassigning %s", rsc->id);
+    } else {
+        crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old));
     }
 
-    crm_info("Unassigning %s from %s", rsc->id, pe__node_name(old));
     pe__set_resource_flags(rsc, pe_rsc_provisional);
-    rsc->allocated_to = NULL;
 
-    /* We're going to free the pe_node_t, but its details member is shared and
-     * will remain, so update that appropriately first.
-     */
-    old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
-                                                rsc);
-    old->details->num_resources--;
-    pcmk__release_node_capacity(old->details->utilization, rsc);
-    free(old);
+    if (rsc->children == NULL) {
+        if (old == NULL) {
+            return;
+        }
+        rsc->allocated_to = NULL;
+
+        /* We're going to free the pe_node_t, but its details member is shared
+         * and will remain, so update that appropriately first.
+         */
+        old->details->allocated_rsc = g_list_remove(old->details->allocated_rsc,
+                                                    rsc);
+        old->details->num_resources--;
+        pcmk__release_node_capacity(old->details->utilization, rsc);
+        free(old);
+        return;
+    }
+
+    for (GList *iter = rsc->children; iter != NULL; iter = iter->next) {
+        pcmk__unassign_resource((pe_resource_t *) iter->data);
+    }
 }
 
 /*!
  * \internal
  * \brief Check whether a resource has reached its migration threshold on a node
  *
  * \param[in,out] rsc       Resource to check
  * \param[in]     node      Node to check
  * \param[out]    failed    If threshold has been reached, this will be set to
  *                          resource that failed (possibly a parent of \p rsc)
  *
  * \return true if the migration threshold has been reached, false otherwise
  */
 bool
 pcmk__threshold_reached(pe_resource_t *rsc, const pe_node_t *node,
                         pe_resource_t **failed)
 {
     int fail_count, remaining_tries;
     pe_resource_t *rsc_to_ban = rsc;
 
     // Migration threshold of 0 means never force away
     if (rsc->migration_threshold == 0) {
         return false;
     }
 
     // If we're ignoring failures, also ignore the migration threshold
     if (pcmk_is_set(rsc->flags, pe_rsc_failure_ignored)) {
         return false;
     }
 
     // If there are no failures, there's no need to force away
     fail_count = pe_get_failcount(node, rsc, NULL,
                                   pe_fc_effective|pe_fc_fillers, NULL);
     if (fail_count <= 0) {
         return false;
     }
 
     // If failed resource is anonymous clone instance, we'll force clone away
     if (!pcmk_is_set(rsc->flags, pe_rsc_unique)) {
         rsc_to_ban = uber_parent(rsc);
     }
 
     // How many more times recovery will be tried on this node
     remaining_tries = rsc->migration_threshold - fail_count;
 
     if (remaining_tries <= 0) {
         crm_warn("%s cannot run on %s due to reaching migration threshold "
                  "(clean up resource to allow again)"
                  CRM_XS " failures=%d migration-threshold=%d",
                  rsc_to_ban->id, pe__node_name(node), fail_count,
                  rsc->migration_threshold);
         if (failed != NULL) {
             *failed = rsc_to_ban;
         }
         return true;
     }
 
     crm_info("%s can fail %d more time%s on "
              "%s before reaching migration threshold (%d)",
              rsc_to_ban->id, remaining_tries, pcmk__plural_s(remaining_tries),
              pe__node_name(node), rsc->migration_threshold);
     return false;
 }
 
 /*!
  * \internal
  * \brief Get a node's score
  *
  * \param[in] node     Node with ID to check
  * \param[in] nodes    List of nodes to look for \p node score in
  *
  * \return Node's score, or -INFINITY if not found
  */
 static int
 get_node_score(const pe_node_t *node, GHashTable *nodes)
 {
     pe_node_t *found_node = NULL;
 
     if ((node != NULL) && (nodes != NULL)) {
         found_node = g_hash_table_lookup(nodes, node->details->id);
     }
     return (found_node == NULL)? -INFINITY : found_node->weight;
 }
 
 /*!
  * \internal
  * \brief Compare two resources according to which should be assigned first
  *
  * \param[in] a     First resource to compare
  * \param[in] b     Second resource to compare
  * \param[in] data  Sorted list of all nodes in cluster
  *
  * \return -1 if \p a should be assigned before \b, 0 if they are equal,
  *         or +1 if \p a should be assigned after \b
  */
 static gint
 cmp_resources(gconstpointer a, gconstpointer b, gpointer data)
 {
     /* GLib insists that this function require gconstpointer arguments, but we
      * make a small, temporary change to each argument (setting the
      * pe_rsc_merging flag) during comparison
      */
     pe_resource_t *resource1 = (pe_resource_t *) a;
     pe_resource_t *resource2 = (pe_resource_t *) b;
     const GList *nodes = data;
 
     int rc = 0;
     int r1_score = -INFINITY;
     int r2_score = -INFINITY;
     pe_node_t *r1_node = NULL;
     pe_node_t *r2_node = NULL;
     GHashTable *r1_nodes = NULL;
     GHashTable *r2_nodes = NULL;
     const char *reason = NULL;
 
     // Resources with highest priority should be assigned first
     reason = "priority";
     r1_score = resource1->priority;
     r2_score = resource2->priority;
     if (r1_score > r2_score) {
         rc = -1;
         goto done;
     }
     if (r1_score < r2_score) {
         rc = 1;
         goto done;
     }
 
     // We need nodes to make any other useful comparisons
     reason = "no node list";
     if (nodes == NULL) {
         goto done;
     }
 
     // Calculate and log node scores
     resource1->cmds->add_colocated_node_scores(resource1, resource1->id,
                                                &r1_nodes, NULL, 1,
                                                pcmk__coloc_select_this_with);
     resource2->cmds->add_colocated_node_scores(resource2, resource2->id,
                                                &r2_nodes, NULL, 1,
                                                pcmk__coloc_select_this_with);
     pe__show_node_scores(true, NULL, resource1->id, r1_nodes,
                          resource1->cluster);
     pe__show_node_scores(true, NULL, resource2->id, r2_nodes,
                          resource2->cluster);
 
     // The resource with highest score on its current node goes first
     reason = "current location";
     if (resource1->running_on != NULL) {
         r1_node = pe__current_node(resource1);
     }
     if (resource2->running_on != NULL) {
         r2_node = pe__current_node(resource2);
     }
     r1_score = get_node_score(r1_node, r1_nodes);
     r2_score = get_node_score(r2_node, r2_nodes);
     if (r1_score > r2_score) {
         rc = -1;
         goto done;
     }
     if (r1_score < r2_score) {
         rc = 1;
         goto done;
     }
 
     // Otherwise a higher score on any node will do
     reason = "score";
     for (const GList *iter = nodes; iter != NULL; iter = iter->next) {
         const pe_node_t *node = (const pe_node_t *) iter->data;
 
         r1_score = get_node_score(node, r1_nodes);
         r2_score = get_node_score(node, r2_nodes);
         if (r1_score > r2_score) {
             rc = -1;
             goto done;
         }
         if (r1_score < r2_score) {
             rc = 1;
             goto done;
         }
     }
 
 done:
     crm_trace("%s (%d)%s%s %c %s (%d)%s%s: %s",
               resource1->id, r1_score,
               ((r1_node == NULL)? "" : " on "),
               ((r1_node == NULL)? "" : r1_node->details->id),
               ((rc < 0)? '>' : ((rc > 0)? '<' : '=')),
               resource2->id, r2_score,
               ((r2_node == NULL)? "" : " on "),
               ((r2_node == NULL)? "" : r2_node->details->id),
               reason);
     if (r1_nodes != NULL) {
         g_hash_table_destroy(r1_nodes);
     }
     if (r2_nodes != NULL) {
         g_hash_table_destroy(r2_nodes);
     }
     return rc;
 }
 
 /*!
  * \internal
  * \brief Sort resources in the order they should be assigned to nodes
  *
  * \param[in,out] data_set  Cluster working set
  */
 void
 pcmk__sort_resources(pe_working_set_t *data_set)
 {
     GList *nodes = g_list_copy(data_set->nodes);
 
     nodes = pcmk__sort_nodes(nodes, NULL);
     data_set->resources = g_list_sort_with_data(data_set->resources,
                                                 cmp_resources, nodes);
     g_list_free(nodes);
 }